From 6173d585df189a763256ed6dc4fcfb5aa26e5e5c Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 12 Jul 2022 11:26:25 +0100 Subject: 1.63.0rc1 --- CHANGES.md | 80 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/13028.misc | 1 - changelog.d/13029.doc | 1 - changelog.d/13031.feature | 1 - changelog.d/13032.doc | 1 - changelog.d/13044.misc | 1 - changelog.d/13077.doc | 3 -- changelog.d/13078.misc | 1 - changelog.d/13079.misc | 1 - changelog.d/13086.doc | 1 - changelog.d/13100.misc | 1 - changelog.d/13103.misc | 1 - changelog.d/13113.misc | 1 - changelog.d/13116.doc | 1 - changelog.d/13119.misc | 1 - changelog.d/13125.feature | 1 - changelog.d/13127.misc | 1 - changelog.d/13129.misc | 1 - changelog.d/13131.bugfix | 1 - changelog.d/13132.doc | 1 - changelog.d/13134.misc | 1 - changelog.d/13135.misc | 1 - changelog.d/13136.misc | 1 - changelog.d/13139.doc | 1 - changelog.d/13143.misc | 1 - changelog.d/13144.misc | 1 - changelog.d/13145.misc | 1 - changelog.d/13148.feature | 1 - changelog.d/13151.misc | 1 - changelog.d/13152.misc | 1 - changelog.d/13153.misc | 1 - changelog.d/13157.misc | 1 - changelog.d/13158.misc | 1 - changelog.d/13159.misc | 1 - changelog.d/13166.doc | 1 - changelog.d/13167.misc | 1 - changelog.d/13174.bugfix | 1 - changelog.d/13194.bugfix | 1 - changelog.d/13195.misc | 1 - changelog.d/13197.bugfix | 1 - changelog.d/13200.removal | 1 - changelog.d/13207.docker | 1 - changelog.d/13209.misc | 1 - changelog.d/13210.misc | 1 - changelog.d/13211.misc | 1 - changelog.d/13212.doc | 1 - changelog.d/13222.misc | 1 - changelog.d/13223.bugfix | 1 - changelog.d/13226.bugfix | 1 - changelog.d/13228.misc | 1 - changelog.d/13235.bugfix | 1 - changelog.d/13236.bugfix | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 54 files changed, 87 insertions(+), 54 deletions(-) delete mode 100644 changelog.d/13028.misc delete mode 100644 changelog.d/13029.doc delete mode 100644 changelog.d/13031.feature delete mode 100644 changelog.d/13032.doc delete mode 100644 changelog.d/13044.misc delete mode 100644 changelog.d/13077.doc delete mode 100644 changelog.d/13078.misc delete mode 100644 changelog.d/13079.misc delete mode 100644 changelog.d/13086.doc delete mode 100644 changelog.d/13100.misc delete mode 100644 changelog.d/13103.misc delete mode 100644 changelog.d/13113.misc delete mode 100644 changelog.d/13116.doc delete mode 100644 changelog.d/13119.misc delete mode 100644 changelog.d/13125.feature delete mode 100644 changelog.d/13127.misc delete mode 100644 changelog.d/13129.misc delete mode 100644 changelog.d/13131.bugfix delete mode 100644 changelog.d/13132.doc delete mode 100644 changelog.d/13134.misc delete mode 100644 changelog.d/13135.misc delete mode 100644 changelog.d/13136.misc delete mode 100644 changelog.d/13139.doc delete mode 100644 changelog.d/13143.misc delete mode 100644 changelog.d/13144.misc delete mode 100644 changelog.d/13145.misc delete mode 100644 changelog.d/13148.feature delete mode 100644 changelog.d/13151.misc delete mode 100644 changelog.d/13152.misc delete mode 100644 changelog.d/13153.misc delete mode 100644 changelog.d/13157.misc delete mode 100644 changelog.d/13158.misc delete mode 100644 changelog.d/13159.misc delete mode 100644 changelog.d/13166.doc delete mode 100644 changelog.d/13167.misc delete mode 100644 changelog.d/13174.bugfix delete mode 100644 changelog.d/13194.bugfix delete mode 100644 changelog.d/13195.misc delete mode 100644 changelog.d/13197.bugfix delete mode 100644 changelog.d/13200.removal delete mode 100644 changelog.d/13207.docker delete mode 100644 changelog.d/13209.misc delete mode 100644 changelog.d/13210.misc delete mode 100644 changelog.d/13211.misc delete mode 100644 changelog.d/13212.doc delete mode 100644 changelog.d/13222.misc delete mode 100644 changelog.d/13223.bugfix delete mode 100644 changelog.d/13226.bugfix delete mode 100644 changelog.d/13228.misc delete mode 100644 changelog.d/13235.bugfix delete mode 100644 changelog.d/13236.bugfix diff --git a/CHANGES.md b/CHANGES.md index ec27cda1b2..ee2f90632f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,83 @@ +Synapse 1.63.0rc1 (2022-07-12) +============================== + +Features +-------- + +- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of /publicRooms by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031)) +- Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125)) +- Improve validation logic in Synapse's REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148)) + + +Bugfixes +-------- + +- Fix application service not being able to join remote federated room without a profile set. ([\#13131](https://github.com/matrix-org/synapse/issues/13131)) +- Make use of the more robust `get_current_state` in `_get_state_map_for_room` to avoid breakages. ([\#13174](https://github.com/matrix-org/synapse/issues/13174)) +- Fix bug where rows were not deleted from `event_push_actions` table on large servers. Introduced in v1.62.0. ([\#13194](https://github.com/matrix-org/synapse/issues/13194)) +- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197)) +- Fix bug where notification counts would get stuck after a highlighted message. Broke in v1.62.0. ([\#13223](https://github.com/matrix-org/synapse/issues/13223)) +- Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226)) +- Fix MSC3202-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235)) +- Fix appservices not receiving room-less EDUs, like presence, if enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236)) + + +Updates to the Docker image +--------------------------- + +- Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1. ([\#13207](https://github.com/matrix-org/synapse/issues/13207)) + + +Improved Documentation +---------------------- + +- Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029)) +- Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032)) +- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077)) +- Add documentation for anonymised homeserver statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086)) +- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116)) +- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132)) +- Add a link to the configuration manual from the homeserver sample config documentation. ([\#13139](https://github.com/matrix-org/synapse/issues/13139)) +- Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166)) +- Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212)) + + +Deprecations and Removals +------------------------- + +- Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar. ([\#13200](https://github.com/matrix-org/synapse/issues/13200)) + + +Internal Changes +---------------- + +- Add type annotations to `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028)) +- Support temporary experimental return values for spam checker module callbacks. ([\#13044](https://github.com/matrix-org/synapse/issues/13044)) +- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222)) +- Enable Complement testing in the 'Twisted Trunk' CI runs. ([\#13079](https://github.com/matrix-org/synapse/issues/13079), [\#13157](https://github.com/matrix-org/synapse/issues/13157)) +- Faster room joins: Handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100)) +- Add missing type hints to `synapse.logging`. ([\#13103](https://github.com/matrix-org/synapse/issues/13103)) +- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113)) +- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153)) +- Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. ([\#13127](https://github.com/matrix-org/synapse/issues/13127)) +- Only one-line SQL statements for logging and tracing. ([\#13129](https://github.com/matrix-org/synapse/issues/13129)) +- Apply ratelimiting earlier in processing of /send request. ([\#13134](https://github.com/matrix-org/synapse/issues/13134)) +- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135)) +- Add type annotations to `tests.server`. ([\#13136](https://github.com/matrix-org/synapse/issues/13136)) +- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158)) +- Faster joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144)) +- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145)) +- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151)) +- Add the ability to set the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment when using `complement.sh`. ([\#13152](https://github.com/matrix-org/synapse/issues/13152)) +- Improve and fix type hints. ([\#13159](https://github.com/matrix-org/synapse/issues/13159)) +- Update config used by Complement to allow device name lookup over federation. ([\#13167](https://github.com/matrix-org/synapse/issues/13167)) +- Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. ([\#13195](https://github.com/matrix-org/synapse/issues/13195)) +- Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). ([\#13209](https://github.com/matrix-org/synapse/issues/13209)) +- Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). ([\#13210](https://github.com/matrix-org/synapse/issues/13210)) +- More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211)) +- Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228)) + + Synapse 1.62.0 (2022-07-05) =========================== diff --git a/changelog.d/13028.misc b/changelog.d/13028.misc deleted file mode 100644 index 4e5f3d8f91..0000000000 --- a/changelog.d/13028.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to `tests.utils`. diff --git a/changelog.d/13029.doc b/changelog.d/13029.doc deleted file mode 100644 index d398f0fdbe..0000000000 --- a/changelog.d/13029.doc +++ /dev/null @@ -1 +0,0 @@ -Add an explanation of the `--report-stats` argument to the docs. diff --git a/changelog.d/13031.feature b/changelog.d/13031.feature deleted file mode 100644 index fee8e9d1ff..0000000000 --- a/changelog.d/13031.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of /publicRooms by room type. diff --git a/changelog.d/13032.doc b/changelog.d/13032.doc deleted file mode 100644 index 54d45ecd0d..0000000000 --- a/changelog.d/13032.doc +++ /dev/null @@ -1 +0,0 @@ -Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. diff --git a/changelog.d/13044.misc b/changelog.d/13044.misc deleted file mode 100644 index f9a0669dd3..0000000000 --- a/changelog.d/13044.misc +++ /dev/null @@ -1 +0,0 @@ -Support temporary experimental return values for spam checker module callbacks. \ No newline at end of file diff --git a/changelog.d/13077.doc b/changelog.d/13077.doc deleted file mode 100644 index 502f2d059e..0000000000 --- a/changelog.d/13077.doc +++ /dev/null @@ -1,3 +0,0 @@ -Clean up references to sample configuration and redirect users to the configuration manual instead. - - diff --git a/changelog.d/13078.misc b/changelog.d/13078.misc deleted file mode 100644 index 3835e97ad9..0000000000 --- a/changelog.d/13078.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory consumption when processing incoming events in large rooms. diff --git a/changelog.d/13079.misc b/changelog.d/13079.misc deleted file mode 100644 index 0133097c83..0000000000 --- a/changelog.d/13079.misc +++ /dev/null @@ -1 +0,0 @@ -Enable Complement testing in the 'Twisted Trunk' CI runs. \ No newline at end of file diff --git a/changelog.d/13086.doc b/changelog.d/13086.doc deleted file mode 100644 index a3960ca325..0000000000 --- a/changelog.d/13086.doc +++ /dev/null @@ -1 +0,0 @@ -Add documentation for anonymised homeserver statistics collection. \ No newline at end of file diff --git a/changelog.d/13100.misc b/changelog.d/13100.misc deleted file mode 100644 index 28f2fe0349..0000000000 --- a/changelog.d/13100.misc +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: Handle race between persisting an event and un-partial stating a room. diff --git a/changelog.d/13103.misc b/changelog.d/13103.misc deleted file mode 100644 index 4de5f9e905..0000000000 --- a/changelog.d/13103.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to `synapse.logging`. diff --git a/changelog.d/13113.misc b/changelog.d/13113.misc deleted file mode 100644 index 7b1a50eec0..0000000000 --- a/changelog.d/13113.misc +++ /dev/null @@ -1 +0,0 @@ -Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. \ No newline at end of file diff --git a/changelog.d/13116.doc b/changelog.d/13116.doc deleted file mode 100644 index f99be50f44..0000000000 --- a/changelog.d/13116.doc +++ /dev/null @@ -1 +0,0 @@ -Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. diff --git a/changelog.d/13119.misc b/changelog.d/13119.misc deleted file mode 100644 index 3bb51962e7..0000000000 --- a/changelog.d/13119.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. diff --git a/changelog.d/13125.feature b/changelog.d/13125.feature deleted file mode 100644 index 9b0f609541..0000000000 --- a/changelog.d/13125.feature +++ /dev/null @@ -1 +0,0 @@ -Add a rate limit for local users sending invites. \ No newline at end of file diff --git a/changelog.d/13127.misc b/changelog.d/13127.misc deleted file mode 100644 index 1414811e0a..0000000000 --- a/changelog.d/13127.misc +++ /dev/null @@ -1 +0,0 @@ -Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. \ No newline at end of file diff --git a/changelog.d/13129.misc b/changelog.d/13129.misc deleted file mode 100644 index 4c2dbb7057..0000000000 --- a/changelog.d/13129.misc +++ /dev/null @@ -1 +0,0 @@ -Only one-line SQL statements for logging and tracing. diff --git a/changelog.d/13131.bugfix b/changelog.d/13131.bugfix deleted file mode 100644 index 06602f03fe..0000000000 --- a/changelog.d/13131.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix application service not being able to join remote federated room without a profile set. diff --git a/changelog.d/13132.doc b/changelog.d/13132.doc deleted file mode 100644 index c577069294..0000000000 --- a/changelog.d/13132.doc +++ /dev/null @@ -1 +0,0 @@ -Document how the Synapse team does reviews. diff --git a/changelog.d/13134.misc b/changelog.d/13134.misc deleted file mode 100644 index e3e16056d1..0000000000 --- a/changelog.d/13134.misc +++ /dev/null @@ -1 +0,0 @@ -Apply ratelimiting earlier in processing of /send request. \ No newline at end of file diff --git a/changelog.d/13135.misc b/changelog.d/13135.misc deleted file mode 100644 index f096dd8749..0000000000 --- a/changelog.d/13135.misc +++ /dev/null @@ -1 +0,0 @@ -Enforce type annotations for `tests.test_server`. diff --git a/changelog.d/13136.misc b/changelog.d/13136.misc deleted file mode 100644 index 6cf451d8cf..0000000000 --- a/changelog.d/13136.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to `tests.server`. diff --git a/changelog.d/13139.doc b/changelog.d/13139.doc deleted file mode 100644 index f5d99d461a..0000000000 --- a/changelog.d/13139.doc +++ /dev/null @@ -1 +0,0 @@ -Add a link to the configuration manual from the homeserver sample config documentation. diff --git a/changelog.d/13143.misc b/changelog.d/13143.misc deleted file mode 100644 index 1cb77c02d7..0000000000 --- a/changelog.d/13143.misc +++ /dev/null @@ -1 +0,0 @@ -Add support to `complement.sh` for skipping the docker build. diff --git a/changelog.d/13144.misc b/changelog.d/13144.misc deleted file mode 100644 index 34762e2fcd..0000000000 --- a/changelog.d/13144.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: skip waiting for full state when processing incoming events over federation. diff --git a/changelog.d/13145.misc b/changelog.d/13145.misc deleted file mode 100644 index d5e2dba866..0000000000 --- a/changelog.d/13145.misc +++ /dev/null @@ -1 +0,0 @@ -Improve exception handling when processing events received over federation. diff --git a/changelog.d/13148.feature b/changelog.d/13148.feature deleted file mode 100644 index d1104b04b0..0000000000 --- a/changelog.d/13148.feature +++ /dev/null @@ -1 +0,0 @@ -Improve validation logic in Synapse's REST endpoints. diff --git a/changelog.d/13151.misc b/changelog.d/13151.misc deleted file mode 100644 index cfe3eed3a1..0000000000 --- a/changelog.d/13151.misc +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: fix race in recalculation of current room state. diff --git a/changelog.d/13152.misc b/changelog.d/13152.misc deleted file mode 100644 index 0c919ab700..0000000000 --- a/changelog.d/13152.misc +++ /dev/null @@ -1 +0,0 @@ -Add the ability to set the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment when using `complement.sh`. \ No newline at end of file diff --git a/changelog.d/13153.misc b/changelog.d/13153.misc deleted file mode 100644 index 3bb51962e7..0000000000 --- a/changelog.d/13153.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. diff --git a/changelog.d/13157.misc b/changelog.d/13157.misc deleted file mode 100644 index 0133097c83..0000000000 --- a/changelog.d/13157.misc +++ /dev/null @@ -1 +0,0 @@ -Enable Complement testing in the 'Twisted Trunk' CI runs. \ No newline at end of file diff --git a/changelog.d/13158.misc b/changelog.d/13158.misc deleted file mode 100644 index 1cb77c02d7..0000000000 --- a/changelog.d/13158.misc +++ /dev/null @@ -1 +0,0 @@ -Add support to `complement.sh` for skipping the docker build. diff --git a/changelog.d/13159.misc b/changelog.d/13159.misc deleted file mode 100644 index bb5554ebe0..0000000000 --- a/changelog.d/13159.misc +++ /dev/null @@ -1 +0,0 @@ -Improve and fix type hints. \ No newline at end of file diff --git a/changelog.d/13166.doc b/changelog.d/13166.doc deleted file mode 100644 index 2d92e341ed..0000000000 --- a/changelog.d/13166.doc +++ /dev/null @@ -1 +0,0 @@ -Add missing links to config options. diff --git a/changelog.d/13167.misc b/changelog.d/13167.misc deleted file mode 100644 index a7c7a688de..0000000000 --- a/changelog.d/13167.misc +++ /dev/null @@ -1 +0,0 @@ -Update config used by Complement to allow device name lookup over federation. \ No newline at end of file diff --git a/changelog.d/13174.bugfix b/changelog.d/13174.bugfix deleted file mode 100644 index b17935b93f..0000000000 --- a/changelog.d/13174.bugfix +++ /dev/null @@ -1 +0,0 @@ -Make use of the more robust `get_current_state` in `_get_state_map_for_room` to avoid breakages. diff --git a/changelog.d/13194.bugfix b/changelog.d/13194.bugfix deleted file mode 100644 index 2c2e8bb21b..0000000000 --- a/changelog.d/13194.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where rows were not deleted from `event_push_actions` table on large servers. Introduced in v1.62.0. diff --git a/changelog.d/13195.misc b/changelog.d/13195.misc deleted file mode 100644 index 5506f767b3..0000000000 --- a/changelog.d/13195.misc +++ /dev/null @@ -1 +0,0 @@ -Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. \ No newline at end of file diff --git a/changelog.d/13197.bugfix b/changelog.d/13197.bugfix deleted file mode 100644 index 8417241523..0000000000 --- a/changelog.d/13197.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. diff --git a/changelog.d/13200.removal b/changelog.d/13200.removal deleted file mode 100644 index 755f5eb192..0000000000 --- a/changelog.d/13200.removal +++ /dev/null @@ -1 +0,0 @@ -Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar. diff --git a/changelog.d/13207.docker b/changelog.d/13207.docker deleted file mode 100644 index 63ba5c8031..0000000000 --- a/changelog.d/13207.docker +++ /dev/null @@ -1 +0,0 @@ -Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1. diff --git a/changelog.d/13209.misc b/changelog.d/13209.misc deleted file mode 100644 index cb0b8b4e63..0000000000 --- a/changelog.d/13209.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13210.misc b/changelog.d/13210.misc deleted file mode 100644 index 407791b8e5..0000000000 --- a/changelog.d/13210.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13211.misc b/changelog.d/13211.misc deleted file mode 100644 index 4d2a6dec65..0000000000 --- a/changelog.d/13211.misc +++ /dev/null @@ -1 +0,0 @@ -More aggressively rotate push actions. diff --git a/changelog.d/13212.doc b/changelog.d/13212.doc deleted file mode 100644 index e6b65d826f..0000000000 --- a/changelog.d/13212.doc +++ /dev/null @@ -1 +0,0 @@ -Add documentation for the existing `databases` option in the homeserver configuration manual. diff --git a/changelog.d/13222.misc b/changelog.d/13222.misc deleted file mode 100644 index 3835e97ad9..0000000000 --- a/changelog.d/13222.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory consumption when processing incoming events in large rooms. diff --git a/changelog.d/13223.bugfix b/changelog.d/13223.bugfix deleted file mode 100644 index 6ee3aed910..0000000000 --- a/changelog.d/13223.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where notification counts would get stuck after a highlighted message. Broke in v1.62.0. diff --git a/changelog.d/13226.bugfix b/changelog.d/13226.bugfix deleted file mode 100644 index df96d41f37..0000000000 --- a/changelog.d/13226.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. diff --git a/changelog.d/13228.misc b/changelog.d/13228.misc deleted file mode 100644 index fec086557e..0000000000 --- a/changelog.d/13228.misc +++ /dev/null @@ -1 +0,0 @@ -Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. diff --git a/changelog.d/13235.bugfix b/changelog.d/13235.bugfix deleted file mode 100644 index 5c31fbc775..0000000000 --- a/changelog.d/13235.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix MSC3202-enabled appservices not receiving to-device messages, preventing messages from being decrypted. \ No newline at end of file diff --git a/changelog.d/13236.bugfix b/changelog.d/13236.bugfix deleted file mode 100644 index 7fddc4413d..0000000000 --- a/changelog.d/13236.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix appservices not receiving room-less EDUs, like presence, if enabled. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 520d8d20ae..9f4352586d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.63.0~rc1) stable; urgency=medium + + * New Synapse release 1.63.0rc1. + + -- Synapse Packaging team Tue, 12 Jul 2022 11:26:02 +0100 + matrix-synapse-py3 (1.62.0) stable; urgency=medium * New Synapse release 1.62.0. diff --git a/pyproject.toml b/pyproject.toml index 4d1007fcb3..f77c02ca27 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.62.0" +version = "1.63.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From ac7aec0cd3d22e3013b167bc397ed67dec1a623b Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 12 Jul 2022 12:52:47 +0100 Subject: Reorder and tidy up changelog --- CHANGES.md | 54 +++++++++++++++++++++++++----------------------------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ee2f90632f..c3d6a18a97 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,22 +4,22 @@ Synapse 1.63.0rc1 (2022-07-12) Features -------- -- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of /publicRooms by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031)) - Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125)) +- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of /publicRooms by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031)) - Improve validation logic in Synapse's REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148)) Bugfixes -------- -- Fix application service not being able to join remote federated room without a profile set. ([\#13131](https://github.com/matrix-org/synapse/issues/13131)) -- Make use of the more robust `get_current_state` in `_get_state_map_for_room` to avoid breakages. ([\#13174](https://github.com/matrix-org/synapse/issues/13174)) -- Fix bug where rows were not deleted from `event_push_actions` table on large servers. Introduced in v1.62.0. ([\#13194](https://github.com/matrix-org/synapse/issues/13194)) -- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197)) -- Fix bug where notification counts would get stuck after a highlighted message. Broke in v1.62.0. ([\#13223](https://github.com/matrix-org/synapse/issues/13223)) +- Fix a long-standing bug where application services were not able to join remote federated rooms without a profile. ([\#13131](https://github.com/matrix-org/synapse/issues/13131)) +- Fix a long-standing bug where `_get_state_map_for_room` might raise errors when third party event rules callbacks are present. ([\#13174](https://github.com/matrix-org/synapse/issues/13174)) - Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226)) -- Fix MSC3202-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235)) -- Fix appservices not receiving room-less EDUs, like presence, if enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236)) +- Fix a bug introduced in 1.54.0 where appservices would not receive room-less EDUs, like presence, if enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236)) +- Fix a bug introduced in 1.62.0 where rows were not deleted from `event_push_actions` table on large servers. ([\#13194](https://github.com/matrix-org/synapse/issues/13194)) +- Fix a bug introduced in 1.62.0 where notification counts would get stuck after a highlighted message. ([\#13223](https://github.com/matrix-org/synapse/issues/13223)) +- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197)) +- Fix [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202)-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235)) Updates to the Docker image @@ -33,13 +33,12 @@ Improved Documentation - Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029)) - Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032)) -- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077)) -- Add documentation for anonymised homeserver statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086)) -- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116)) -- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132)) -- Add a link to the configuration manual from the homeserver sample config documentation. ([\#13139](https://github.com/matrix-org/synapse/issues/13139)) - Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166)) +- Add documentation for anonymised homeserver statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086)) - Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212)) +- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077), [\#13139](https://github.com/matrix-org/synapse/issues/13139)) +- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132)) +- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116)) Deprecations and Removals @@ -51,27 +50,24 @@ Deprecations and Removals Internal Changes ---------------- -- Add type annotations to `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028)) +- Add type annotations to `synapse.logging`, `tests.server` and `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028), [\#13103](https://github.com/matrix-org/synapse/issues/13103), [\#13159](https://github.com/matrix-org/synapse/issues/13159), [\#13136](https://github.com/matrix-org/synapse/issues/13136)) +- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135)) - Support temporary experimental return values for spam checker module callbacks. ([\#13044](https://github.com/matrix-org/synapse/issues/13044)) -- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222)) +- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158)) +- Add support to `complement.sh` for setting the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment variable. ([\#13152](https://github.com/matrix-org/synapse/issues/13152)) - Enable Complement testing in the 'Twisted Trunk' CI runs. ([\#13079](https://github.com/matrix-org/synapse/issues/13079), [\#13157](https://github.com/matrix-org/synapse/issues/13157)) -- Faster room joins: Handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100)) -- Add missing type hints to `synapse.logging`. ([\#13103](https://github.com/matrix-org/synapse/issues/13103)) -- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113)) -- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153)) - Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. ([\#13127](https://github.com/matrix-org/synapse/issues/13127)) -- Only one-line SQL statements for logging and tracing. ([\#13129](https://github.com/matrix-org/synapse/issues/13129)) -- Apply ratelimiting earlier in processing of /send request. ([\#13134](https://github.com/matrix-org/synapse/issues/13134)) -- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135)) -- Add type annotations to `tests.server`. ([\#13136](https://github.com/matrix-org/synapse/issues/13136)) -- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158)) -- Faster joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144)) -- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145)) -- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151)) -- Add the ability to set the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment when using `complement.sh`. ([\#13152](https://github.com/matrix-org/synapse/issues/13152)) -- Improve and fix type hints. ([\#13159](https://github.com/matrix-org/synapse/issues/13159)) - Update config used by Complement to allow device name lookup over federation. ([\#13167](https://github.com/matrix-org/synapse/issues/13167)) +- Faster room joins: handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100)) +- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151)) +- Faster room joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144)) +- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113)) +- Avoid stripping line breaks from SQL sent to the database. ([\#13129](https://github.com/matrix-org/synapse/issues/13129)) +- Apply ratelimiting earlier in processing of `/send` requests. ([\#13134](https://github.com/matrix-org/synapse/issues/13134)) +- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145)) - Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. ([\#13195](https://github.com/matrix-org/synapse/issues/13195)) +- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153)) +- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222)) - Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). ([\#13209](https://github.com/matrix-org/synapse/issues/13209)) - Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). ([\#13210](https://github.com/matrix-org/synapse/issues/13210)) - More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211)) -- cgit 1.4.1 From f14c63213492f512f451b295331a18dbb28685c6 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 12 Jul 2022 13:01:42 +0100 Subject: Update changelog once more --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index c3d6a18a97..4071f973de 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Features -------- - Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125)) -- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of /publicRooms by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031)) +- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of `/publicRooms` by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031)) - Improve validation logic in Synapse's REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148)) @@ -15,7 +15,7 @@ Bugfixes - Fix a long-standing bug where application services were not able to join remote federated rooms without a profile. ([\#13131](https://github.com/matrix-org/synapse/issues/13131)) - Fix a long-standing bug where `_get_state_map_for_room` might raise errors when third party event rules callbacks are present. ([\#13174](https://github.com/matrix-org/synapse/issues/13174)) - Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226)) -- Fix a bug introduced in 1.54.0 where appservices would not receive room-less EDUs, like presence, if enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236)) +- Fix a bug introduced in 1.54.0 where appservices would not receive room-less EDUs, like presence, when both [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) and [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) are enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236)) - Fix a bug introduced in 1.62.0 where rows were not deleted from `event_push_actions` table on large servers. ([\#13194](https://github.com/matrix-org/synapse/issues/13194)) - Fix a bug introduced in 1.62.0 where notification counts would get stuck after a highlighted message. ([\#13223](https://github.com/matrix-org/synapse/issues/13223)) - Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197)) -- cgit 1.4.1 From 2d82cdafd23b5bcb597e776537e23c367e18d4ac Mon Sep 17 00:00:00 2001 From: andrew do Date: Tue, 12 Jul 2022 07:30:53 -0700 Subject: expose whether a room is a space in the Admin API (#13208) --- changelog.d/13208.feature | 1 + docs/admin_api/rooms.md | 29 +++++++++++++++++++++-------- synapse/storage/databases/main/room.py | 6 ++++-- tests/rest/admin/test_room.py | 13 ++++++++++--- 4 files changed, 36 insertions(+), 13 deletions(-) create mode 100644 changelog.d/13208.feature diff --git a/changelog.d/13208.feature b/changelog.d/13208.feature new file mode 100644 index 0000000000..b0c5f090ee --- /dev/null +++ b/changelog.d/13208.feature @@ -0,0 +1 @@ +Add a `room_type` field in the responses for the list room and room details admin API. Contributed by @andrewdoh. \ No newline at end of file diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index d4873f9490..9aa489e4a3 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -59,6 +59,7 @@ The following fields are possible in the JSON response body: - `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"]. - `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"]. - `state_events` - Total number of state_events of a room. Complexity of the room. + - `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space. If the room does not define a type, the value will be `null`. * `offset` - The current pagination offset in rooms. This parameter should be used instead of `next_token` for room offset as `next_token` is not intended to be parsed. @@ -101,7 +102,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 93534 + "state_events": 93534, + "room_type": "m.space" }, ... (8 hidden items) ... { @@ -118,7 +120,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 8345 + "state_events": 8345, + "room_type": null } ], "offset": 0, @@ -151,7 +154,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 8 + "state_events": 8, + "room_type": null } ], "offset": 0, @@ -184,7 +188,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 93534 + "state_events": 93534, + "room_type": null }, ... (98 hidden items) ... { @@ -201,7 +206,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 8345 + "state_events": 8345, + "room_type": "m.space" } ], "offset": 0, @@ -238,7 +244,9 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 93534 + "state_events": 93534, + "room_type": "m.space" + }, ... (48 hidden items) ... { @@ -255,7 +263,9 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 8345 + "state_events": 8345, + "room_type": null + } ], "offset": 100, @@ -290,6 +300,8 @@ The following fields are possible in the JSON response body: * `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"]. * `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"]. * `state_events` - Total number of state_events of a room. Complexity of the room. +* `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space. + If the room does not define a type, the value will be `null`. The API is: @@ -317,7 +329,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 93534 + "state_events": 93534, + "room_type": "m.space" } ``` diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 13d6a1d5c0..d6d485507b 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -175,7 +175,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): rooms.creator, state.encryption, state.is_federatable AS federatable, rooms.is_public AS public, state.join_rules, state.guest_access, state.history_visibility, curr.current_state_events AS state_events, - state.avatar, state.topic + state.avatar, state.topic, state.room_type FROM rooms LEFT JOIN room_stats_state state USING (room_id) LEFT JOIN room_stats_current curr USING (room_id) @@ -596,7 +596,8 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): SELECT state.room_id, state.name, state.canonical_alias, curr.joined_members, curr.local_users_in_room, rooms.room_version, rooms.creator, state.encryption, state.is_federatable, rooms.is_public, state.join_rules, - state.guest_access, state.history_visibility, curr.current_state_events + state.guest_access, state.history_visibility, curr.current_state_events, + state.room_type FROM room_stats_state state INNER JOIN room_stats_current curr USING (room_id) INNER JOIN rooms USING (room_id) @@ -646,6 +647,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): "guest_access": room[11], "history_visibility": room[12], "state_events": room[13], + "room_type": room[14], } ) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 230dc76f72..2526136ff8 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -21,7 +21,7 @@ from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventTypes, Membership, RoomTypes from synapse.api.errors import Codes from synapse.handlers.pagination import PaginationHandler from synapse.rest.client import directory, events, login, room @@ -1130,6 +1130,8 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertIn("guest_access", r) self.assertIn("history_visibility", r) self.assertIn("state_events", r) + self.assertIn("room_type", r) + self.assertIsNone(r["room_type"]) # Check that the correct number of total rooms was returned self.assertEqual(channel.json_body["total_rooms"], total_rooms) @@ -1229,7 +1231,11 @@ class RoomTestCase(unittest.HomeserverTestCase): def test_correct_room_attributes(self) -> None: """Test the correct attributes for a room are returned""" # Create a test room - room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + room_id = self.helper.create_room_as( + self.admin_user, + tok=self.admin_user_tok, + extra_content={"creation_content": {"type": RoomTypes.SPACE}}, + ) test_alias = "#test:test" test_room_name = "something" @@ -1306,6 +1312,7 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertEqual(room_id, r["room_id"]) self.assertEqual(test_room_name, r["name"]) self.assertEqual(test_alias, r["canonical_alias"]) + self.assertEqual(RoomTypes.SPACE, r["room_type"]) def test_room_list_sort_order(self) -> None: """Test room list sort ordering. alphabetical name versus number of members, @@ -1630,7 +1637,7 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertIn("guest_access", channel.json_body) self.assertIn("history_visibility", channel.json_body) self.assertIn("state_events", channel.json_body) - + self.assertIn("room_type", channel.json_body) self.assertEqual(room_id_1, channel.json_body["room_id"]) def test_single_room_devices(self) -> None: -- cgit 1.4.1 From b19060a29b4f73897847db2aba5d03ec819086e0 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 12 Jul 2022 12:06:29 -0500 Subject: Make the AS login method call `Auth.get_user_by_req` for checking the AS token. (#13094) This gets rid of another usage of get_appservice_by_req, with all the benefits, including correctly tracking the appservice IP and setting the tracing attributes correctly. Signed-off-by: Quentin Gliech --- changelog.d/13094.misc | 1 + synapse/rest/client/login.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13094.misc diff --git a/changelog.d/13094.misc b/changelog.d/13094.misc new file mode 100644 index 0000000000..f1e55ae476 --- /dev/null +++ b/changelog.d/13094.misc @@ -0,0 +1 @@ +Make the AS login method call `Auth.get_user_by_req` for checking the AS token. diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index dd75e40f34..0437c87d8d 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -28,7 +28,7 @@ from typing import ( from typing_extensions import TypedDict -from synapse.api.errors import Codes, LoginError, SynapseError +from synapse.api.errors import Codes, InvalidClientTokenError, LoginError, SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.api.urls import CLIENT_API_PREFIX from synapse.appservice import ApplicationService @@ -172,7 +172,13 @@ class LoginRestServlet(RestServlet): try: if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE: - appservice = self.auth.get_appservice_by_req(request) + requester = await self.auth.get_user_by_req(request) + appservice = requester.app_service + + if appservice is None: + raise InvalidClientTokenError( + "This login method is only valid for application services" + ) if appservice.is_rate_limited(): await self._address_ratelimiter.ratelimit( -- cgit 1.4.1 From 6f30eb5b8e3d88d1b44cc7f9d7e548b30081d7e6 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 12 Jul 2022 10:48:47 -0700 Subject: Add info about configuration in the url preview docs (#13233) Cross-link doc pages for easier navigation. --- changelog.d/13233.doc | 2 ++ docs/development/url_previews.md | 1 + 2 files changed, 3 insertions(+) create mode 100644 changelog.d/13233.doc diff --git a/changelog.d/13233.doc b/changelog.d/13233.doc new file mode 100644 index 0000000000..b6babd7f15 --- /dev/null +++ b/changelog.d/13233.doc @@ -0,0 +1,2 @@ +Add a link to configuration instructions in the URL preview documentation. + diff --git a/docs/development/url_previews.md b/docs/development/url_previews.md index 154b9a5e12..25f189683d 100644 --- a/docs/development/url_previews.md +++ b/docs/development/url_previews.md @@ -1,5 +1,6 @@ URL Previews ============ +For information on how to enable URL previews in synapse, please see the [config manual](../usage/configuration/config_documentation.md#url_preview_enabled). The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API for URLs which outputs [Open Graph](https://ogp.me/) responses (with some Matrix -- cgit 1.4.1 From 3f178332d68cb723150d0e392ed92780e4e5a610 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 12 Jul 2022 18:57:38 +0100 Subject: Log the stack when waiting for an entire room to be un-partial stated (#13257) The stack is already logged when waiting for an event to be un-partial stated. Log the stack for rooms as well, to aid in debugging. --- changelog.d/13257.misc | 1 + synapse/storage/util/partial_state_events_tracker.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/13257.misc diff --git a/changelog.d/13257.misc b/changelog.d/13257.misc new file mode 100644 index 0000000000..5fc1388520 --- /dev/null +++ b/changelog.d/13257.misc @@ -0,0 +1 @@ +Log the stack when waiting for an entire room to be un-partial stated. diff --git a/synapse/storage/util/partial_state_events_tracker.py b/synapse/storage/util/partial_state_events_tracker.py index 211437cfaa..466e5137f2 100644 --- a/synapse/storage/util/partial_state_events_tracker.py +++ b/synapse/storage/util/partial_state_events_tracker.py @@ -166,6 +166,7 @@ class PartialCurrentStateTracker: logger.info( "Awaiting un-partial-stating of room %s", room_id, + stack_info=True, ) await make_deferred_yieldable(d) -- cgit 1.4.1 From fa71bb18b527d1a3e2629b48640ea67fff2f8c59 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 12 Jul 2022 19:18:53 +0100 Subject: Drop support for delegating email validation (#13192) * Drop support for delegating email validation Delegating email validation to an IS is insecure (since it allows the owner of the IS to do a password reset on your HS), and has long been deprecated. It will now cause a config error at startup. * Update unit test which checks for email verification Give it an `email` config instead of a threepid delegate * Remove unused method `requestEmailToken` * Simplify config handling for email verification Rather than an enum and a boolean, all we need here is a single bool, which says whether we are or are not doing email verification. * update docs * changelog * upgrade.md: fix typo * update version number this will be in 1.64, not 1.63 * update version number this one too --- CHANGES.md | 7 +- changelog.d/13192.removal | 1 + docs/upgrade.md | 15 ++++ docs/usage/configuration/config_documentation.md | 28 +++--- synapse/app/homeserver.py | 3 +- synapse/config/emailconfig.py | 45 ++-------- synapse/config/registration.py | 11 ++- synapse/handlers/identity.py | 56 +----------- synapse/handlers/ui_auth/checkers.py | 21 +---- synapse/rest/client/account.py | 107 +++++++---------------- synapse/rest/client/register.py | 59 +++++-------- synapse/rest/synapse/client/password_reset.py | 8 +- tests/rest/client/test_register.py | 2 +- 13 files changed, 110 insertions(+), 253 deletions(-) create mode 100644 changelog.d/13192.removal diff --git a/CHANGES.md b/CHANGES.md index 4071f973de..bcf9fae4a5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,8 @@ +Synapse vNext +============= + +As of this release, Synapse no longer allows the tasks of verifying email address ownership, and password reset confirmation, to be delegated to an identity server. For more information, see the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.63/docs/upgrade.md#upgrading-to-v1630). + Synapse 1.63.0rc1 (2022-07-12) ============================== @@ -73,7 +78,6 @@ Internal Changes - More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211)) - Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228)) - Synapse 1.62.0 (2022-07-05) =========================== @@ -81,7 +85,6 @@ No significant changes since 1.62.0rc3. Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse. - Synapse 1.62.0rc3 (2022-07-04) ============================== diff --git a/changelog.d/13192.removal b/changelog.d/13192.removal new file mode 100644 index 0000000000..a7dffd1c48 --- /dev/null +++ b/changelog.d/13192.removal @@ -0,0 +1 @@ +Drop support for delegating email verification to an external server. diff --git a/docs/upgrade.md b/docs/upgrade.md index 312f0b87fe..3aaeb499ce 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,21 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.64.0 + +## Delegation of email validation no longer supported + +As of this version, Synapse no longer allows the tasks of verifying email address +ownership, and password reset confirmation, to be delegated to an identity server. + +To continue to allow users to add email addresses to their homeserver accounts, +and perform password resets, make sure that Synapse is configured with a +working email server in the `email` configuration section (including, at a +minimum, a `notif_from` setting.) + +Specifying an `email` setting under `account_threepid_delegates` will now cause +an error at startup. + # Upgrading to v1.62.0 ## New signatures for spam checker callbacks diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 5deabb53d7..5fe502e33a 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2168,30 +2168,26 @@ default_identity_server: https://matrix.org --- ### `account_threepid_delegates` -Handle threepid (email/phone etc) registration and password resets through a set of -*trusted* identity servers. Note that this allows the configured identity server to -reset passwords for accounts! +Delegate verification of phone numbers to an identity server. -Be aware that if `email` is not set, and SMTP options have not been -configured in the email config block, registration and user password resets via -email will be globally disabled. +When a user wishes to add a phone number to their account, we need to verify that they +actually own that phone number, which requires sending them a text message (SMS). +Currently Synapse does not support sending those texts itself and instead delegates the +task to an identity server. The base URI for the identity server to be used is +specified by the `account_threepid_delegates.msisdn` option. -Additionally, if `msisdn` is not set, registration and password resets via msisdn -will be disabled regardless, and users will not be able to associate an msisdn -identifier to their account. This is due to Synapse currently not supporting -any method of sending SMS messages on its own. +If this is left unspecified, Synapse will not allow users to add phone numbers to +their account. -To enable using an identity server for operations regarding a particular third-party -identifier type, set the value to the URL of that identity server as shown in the -examples below. +(Servers handling the these requests must answer the `/requestToken` endpoints defined +by the Matrix Identity Service API +[specification](https://matrix.org/docs/spec/identity_service/latest).) -Servers handling the these requests must answer the `/requestToken` endpoints defined -by the Matrix Identity Service API [specification](https://matrix.org/docs/spec/identity_service/latest). +*Updated in Synapse 1.64.0*: No longer accepts an `email` option. Example configuration: ```yaml account_threepid_delegates: - email: https://example.com # Delegate email sending to example.com msisdn: http://localhost:8090 # Delegate SMS sending to this local process ``` --- diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 745e704141..6bafa7d3f3 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -44,7 +44,6 @@ from synapse.app._base import ( register_start, ) from synapse.config._base import ConfigError, format_config_error -from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.server import ListenerConfig from synapse.federation.transport.server import TransportLayerServer @@ -202,7 +201,7 @@ class SynapseHomeServer(HomeServer): } ) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: from synapse.rest.synapse.client.password_reset import ( PasswordResetSubmitTokenResource, ) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 6e11fbdb9a..3ead80d985 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -18,7 +18,6 @@ import email.utils import logging import os -from enum import Enum from typing import Any import attr @@ -131,41 +130,22 @@ class EmailConfig(Config): self.email_enable_notifs = email_config.get("enable_notifs", False) - self.threepid_behaviour_email = ( - # Have Synapse handle the email sending if account_threepid_delegates.email - # is not defined - # msisdn is currently always remote while Synapse does not support any method of - # sending SMS messages - ThreepidBehaviour.REMOTE - if self.root.registration.account_threepid_delegate_email - else ThreepidBehaviour.LOCAL - ) - if config.get("trust_identity_server_for_password_resets"): raise ConfigError( 'The config option "trust_identity_server_for_password_resets" ' - 'has been replaced by "account_threepid_delegate". ' - "Please consult the configuration manual at docs/usage/configuration/config_documentation.md for " - "details and update your config file." + "is no longer supported. Please remove it from the config file." ) - self.local_threepid_handling_disabled_due_to_email_config = False - if ( - self.threepid_behaviour_email == ThreepidBehaviour.LOCAL - and email_config == {} - ): - # We cannot warn the user this has happened here - # Instead do so when a user attempts to reset their password - self.local_threepid_handling_disabled_due_to_email_config = True - - self.threepid_behaviour_email = ThreepidBehaviour.OFF + # If we have email config settings, assume that we can verify ownership of + # email addresses. + self.can_verify_email = email_config != {} # Get lifetime of a validation token in milliseconds self.email_validation_token_lifetime = self.parse_duration( email_config.get("validation_token_lifetime", "1h") ) - if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.can_verify_email: missing = [] if not self.email_notif_from: missing.append("email.notif_from") @@ -356,18 +336,3 @@ class EmailConfig(Config): "Config option email.invite_client_location must be a http or https URL", path=("email", "invite_client_location"), ) - - -class ThreepidBehaviour(Enum): - """ - Enum to define the behaviour of Synapse with regards to when it contacts an identity - server for 3pid registration and password resets - - REMOTE = use an external server to send tokens - LOCAL = send tokens ourselves - OFF = disable registration via 3pid and password resets - """ - - REMOTE = "remote" - LOCAL = "local" - OFF = "off" diff --git a/synapse/config/registration.py b/synapse/config/registration.py index fcf99be092..685a0423c5 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -20,6 +20,13 @@ from synapse.config._base import Config, ConfigError from synapse.types import JsonDict, RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool +NO_EMAIL_DELEGATE_ERROR = """\ +Delegation of email verification to an identity server is no longer supported. To +continue to allow users to add email addresses to their accounts, and use them for +password resets, configure Synapse with an SMTP server via the `email` setting, and +remove `account_threepid_delegates.email`. +""" + class RegistrationConfig(Config): section = "registration" @@ -51,7 +58,9 @@ class RegistrationConfig(Config): self.bcrypt_rounds = config.get("bcrypt_rounds", 12) account_threepid_delegates = config.get("account_threepid_delegates") or {} - self.account_threepid_delegate_email = account_threepid_delegates.get("email") + if "email" in account_threepid_delegates: + raise ConfigError(NO_EMAIL_DELEGATE_ERROR) + # self.account_threepid_delegate_email = account_threepid_delegates.get("email") self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 9bca2bc4b2..c70fdcc85e 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -26,7 +26,6 @@ from synapse.api.errors import ( SynapseError, ) from synapse.api.ratelimiting import Ratelimiter -from synapse.config.emailconfig import ThreepidBehaviour from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest @@ -434,48 +433,6 @@ class IdentityHandler: return session_id - async def requestEmailToken( - self, - id_server: str, - email: str, - client_secret: str, - send_attempt: int, - next_link: Optional[str] = None, - ) -> JsonDict: - """ - Request an external server send an email on our behalf for the purposes of threepid - validation. - - Args: - id_server: The identity server to proxy to - email: The email to send the message to - client_secret: The unique client_secret sends by the user - send_attempt: Which attempt this is - next_link: A link to redirect the user to once they submit the token - - Returns: - The json response body from the server - """ - params = { - "email": email, - "client_secret": client_secret, - "send_attempt": send_attempt, - } - if next_link: - params["next_link"] = next_link - - try: - data = await self.http_client.post_json_get_json( - id_server + "/_matrix/identity/api/v1/validate/email/requestToken", - params, - ) - return data - except HttpResponseException as e: - logger.info("Proxied requestToken failed: %r", e) - raise e.to_synapse_error() - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - async def requestMsisdnToken( self, id_server: str, @@ -549,18 +506,7 @@ class IdentityHandler: validation_session = None # Try to validate as email - if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - # Remote emails will only be used if a valid identity server is provided. - assert ( - self.hs.config.registration.account_threepid_delegate_email is not None - ) - - # Ask our delegated email identity server - validation_session = await self.threepid_from_creds( - self.hs.config.registration.account_threepid_delegate_email, - threepid_creds, - ) - elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.hs.config.email.can_verify_email: # Get a validated session matching these details validation_session = await self.store.get_threepid_validation_session( "email", client_secret, sid=sid, validated=True diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 05cebb5d4d..a744d68c64 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -19,7 +19,6 @@ from twisted.web.client import PartialDownloadError from synapse.api.constants import LoginType from synapse.api.errors import Codes, LoginError, SynapseError -from synapse.config.emailconfig import ThreepidBehaviour from synapse.util import json_decoder if TYPE_CHECKING: @@ -153,7 +152,7 @@ class _BaseThreepidAuthChecker: logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - # msisdns are currently always ThreepidBehaviour.REMOTE + # msisdns are currently always verified via the IS if medium == "msisdn": if not self.hs.config.registration.account_threepid_delegate_msisdn: raise SynapseError( @@ -164,18 +163,7 @@ class _BaseThreepidAuthChecker: threepid_creds, ) elif medium == "email": - if ( - self.hs.config.email.threepid_behaviour_email - == ThreepidBehaviour.REMOTE - ): - assert self.hs.config.registration.account_threepid_delegate_email - threepid = await identity_handler.threepid_from_creds( - self.hs.config.registration.account_threepid_delegate_email, - threepid_creds, - ) - elif ( - self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL - ): + if self.hs.config.email.can_verify_email: threepid = None row = await self.store.get_threepid_validation_session( medium, @@ -227,10 +215,7 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec _BaseThreepidAuthChecker.__init__(self, hs) def is_enabled(self) -> bool: - return self.hs.config.email.threepid_behaviour_email in ( - ThreepidBehaviour.REMOTE, - ThreepidBehaviour.LOCAL, - ) + return self.hs.config.email.can_verify_email async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("email", authdict) diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index bdc4a9c068..19c2da4244 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -28,7 +28,6 @@ from synapse.api.errors import ( SynapseError, ThreepidValidationError, ) -from synapse.config.emailconfig import ThreepidBehaviour from synapse.handlers.ui_auth import UIAuthSessionDataConstants from synapse.http.server import HttpServer, finish_request, respond_with_html from synapse.http.servlet import ( @@ -64,7 +63,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): self.config = hs.config self.identity_handler = hs.get_identity_handler() - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -73,11 +72,10 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.email.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "User password resets have been disabled due to lack of email config" - ) + if not self.config.email.can_verify_email: + logger.warning( + "User password resets have been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based password resets have been disabled on this server" ) @@ -129,35 +127,21 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.registration.account_threepid_delegate_email - - # Have the configured identity server handle the request - ret = await self.identity_handler.requestEmailToken( - self.hs.config.registration.account_threepid_delegate_email, - email, - client_secret, - send_attempt, - next_link, - ) - else: - # Send password reset emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_password_reset_mail, - next_link, - ) - - # Wrap the session id in a JSON object - ret = {"sid": sid} + # Send password reset emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_password_reset_mail, + next_link, + ) threepid_send_requests.labels(type="email", reason="password_reset").observe( send_attempt ) - return 200, ret + # Wrap the session id in a JSON object + return 200, {"sid": sid} class PasswordRestServlet(RestServlet): @@ -349,7 +333,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() self.store = self.hs.get_datastores().main - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -358,11 +342,10 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.email.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) + if not self.config.email.can_verify_email: + logger.warning( + "Adding emails have been disabled due to lack of an email config" + ) raise SynapseError( 400, "Adding an email to your account is disabled on this server" ) @@ -413,35 +396,20 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.registration.account_threepid_delegate_email - - # Have the configured identity server handle the request - ret = await self.identity_handler.requestEmailToken( - self.hs.config.registration.account_threepid_delegate_email, - email, - client_secret, - send_attempt, - next_link, - ) - else: - # Send threepid validation emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_add_threepid_mail, - next_link, - ) - - # Wrap the session id in a JSON object - ret = {"sid": sid} + sid = await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_add_threepid_mail, + next_link, + ) threepid_send_requests.labels(type="email", reason="add_threepid").observe( send_attempt ) - return 200, ret + # Wrap the session id in a JSON object + return 200, {"sid": sid} class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -534,25 +502,18 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): self.config = hs.config self.clock = hs.get_clock() self.store = hs.get_datastores().main - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: self._failure_email_template = ( self.config.email.email_add_threepid_template_failure_html ) async def on_GET(self, request: Request) -> None: - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.email.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) - raise SynapseError( - 400, "Adding an email to your account is disabled on this server" + if not self.config.email.can_verify_email: + logger.warning( + "Adding emails have been disabled due to lack of an email config" ) - elif self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: raise SynapseError( - 400, - "This homeserver is not validating threepids. Use an identity server " - "instead.", + 400, "Adding an email to your account is disabled on this server" ) sid = parse_string(request, "sid", required=True) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index e8e51a9c66..a8402cdb3a 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -31,7 +31,6 @@ from synapse.api.errors import ( ) from synapse.api.ratelimiting import Ratelimiter from synapse.config import ConfigError -from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.config.server import is_threepid_reserved @@ -74,7 +73,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() self.config = hs.config - if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.hs.config.email.can_verify_email: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -83,13 +82,10 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if ( - self.hs.config.email.local_threepid_handling_disabled_due_to_email_config - ): - logger.warning( - "Email registration has been disabled due to lack of email config" - ) + if not self.hs.config.email.can_verify_email: + logger.warning( + "Email registration has been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based registration has been disabled on this server" ) @@ -138,35 +134,21 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.registration.account_threepid_delegate_email - - # Have the configured identity server handle the request - ret = await self.identity_handler.requestEmailToken( - self.hs.config.registration.account_threepid_delegate_email, - email, - client_secret, - send_attempt, - next_link, - ) - else: - # Send registration emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_registration_mail, - next_link, - ) - - # Wrap the session id in a JSON object - ret = {"sid": sid} + # Send registration emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_registration_mail, + next_link, + ) threepid_send_requests.labels(type="email", reason="register").observe( send_attempt ) - return 200, ret + # Wrap the session id in a JSON object + return 200, {"sid": sid} class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -260,7 +242,7 @@ class RegistrationSubmitTokenServlet(RestServlet): self.clock = hs.get_clock() self.store = hs.get_datastores().main - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: self._failure_email_template = ( self.config.email.email_registration_template_failure_html ) @@ -270,11 +252,10 @@ class RegistrationSubmitTokenServlet(RestServlet): raise SynapseError( 400, "This medium is currently not supported for registration" ) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.email.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "User registration via email has been disabled due to lack of email config" - ) + if not self.config.email.can_verify_email: + logger.warning( + "User registration via email has been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based registration is disabled on this server" ) diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py index 6ac9dbc7c9..b9402cfb75 100644 --- a/synapse/rest/synapse/client/password_reset.py +++ b/synapse/rest/synapse/client/password_reset.py @@ -17,7 +17,6 @@ from typing import TYPE_CHECKING, Tuple from twisted.web.server import Request from synapse.api.errors import ThreepidValidationError -from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.server import DirectServeHtmlResource from synapse.http.servlet import parse_string from synapse.util.stringutils import assert_valid_client_secret @@ -46,9 +45,6 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): self.clock = hs.get_clock() self.store = hs.get_datastores().main - self._local_threepid_handling_disabled_due_to_email_config = ( - hs.config.email.local_threepid_handling_disabled_due_to_email_config - ) self._confirmation_email_template = ( hs.config.email.email_password_reset_template_confirmation_html ) @@ -59,8 +55,8 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): hs.config.email.email_password_reset_template_failure_html ) - # This resource should not be mounted if threepid behaviour is not LOCAL - assert hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL + # This resource should only be mounted if email validation is enabled + assert hs.config.email.can_verify_email async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]: sid = parse_string(request, "sid", required=True) diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index afb08b2736..cb27458746 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -592,9 +592,9 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "require_at_registration": True, }, "account_threepid_delegates": { - "email": "https://id_server", "msisdn": "https://id_server", }, + "email": {"notif_from": "Synapse "}, } ) def test_advertised_flows_captcha_and_terms_and_3pids(self) -> None: -- cgit 1.4.1 From 52a0c8f2f7fc5a6dad02d3b6bdae90f3e58842c9 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 12 Jul 2022 19:46:32 +0100 Subject: Rename test case method to `add_hashes_and_signatures_from_other_server` (#13255) --- changelog.d/13255.misc | 1 + tests/federation/test_federation_client.py | 6 +++--- tests/federation/test_federation_server.py | 13 ++++--------- tests/handlers/test_federation.py | 2 +- tests/handlers/test_federation_event.py | 6 +++--- tests/test_visibility.py | 2 +- tests/unittest.py | 2 +- 7 files changed, 14 insertions(+), 18 deletions(-) create mode 100644 changelog.d/13255.misc diff --git a/changelog.d/13255.misc b/changelog.d/13255.misc new file mode 100644 index 0000000000..cba6b9ee0f --- /dev/null +++ b/changelog.d/13255.misc @@ -0,0 +1 @@ +Preparatory work for a per-room rate limiter on joins. diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index 268a48d7ba..d2bda07198 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -45,7 +45,7 @@ class FederationClientTest(FederatingHomeserverTestCase): # mock up some events to use in the response. # In real life, these would have things in `prev_events` and `auth_events`, but that's # a bit annoying to mock up, and the code under test doesn't care, so we don't bother. - create_event_dict = self.add_hashes_and_signatures( + create_event_dict = self.add_hashes_and_signatures_from_other_server( { "room_id": test_room_id, "type": "m.room.create", @@ -57,7 +57,7 @@ class FederationClientTest(FederatingHomeserverTestCase): "origin_server_ts": 500, } ) - member_event_dict = self.add_hashes_and_signatures( + member_event_dict = self.add_hashes_and_signatures_from_other_server( { "room_id": test_room_id, "type": "m.room.member", @@ -69,7 +69,7 @@ class FederationClientTest(FederatingHomeserverTestCase): "origin_server_ts": 600, } ) - pl_event_dict = self.add_hashes_and_signatures( + pl_event_dict = self.add_hashes_and_signatures_from_other_server( { "room_id": test_room_id, "type": "m.room.power_levels", diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 413b3c9426..fd15e680ed 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -20,7 +20,6 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.config.server import DEFAULT_ROOM_VERSION -from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import make_event_from_dict from synapse.federation.federation_server import server_matches_acl_event from synapse.rest import admin @@ -163,11 +162,9 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): join_result = self._make_join(joining_user) join_event_dict = join_result["event"] - add_hashes_and_signatures( - KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + self.add_hashes_and_signatures_from_other_server( join_event_dict, - signature_name=self.OTHER_SERVER_NAME, - signing_key=self.OTHER_SERVER_SIGNATURE_KEY, + KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], ) channel = self.make_signed_federation_request( "PUT", @@ -220,11 +217,9 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): join_result = self._make_join(joining_user) join_event_dict = join_result["event"] - add_hashes_and_signatures( - KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + self.add_hashes_and_signatures_from_other_server( join_event_dict, - signature_name=self.OTHER_SERVER_NAME, - signing_key=self.OTHER_SERVER_SIGNATURE_KEY, + KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], ) channel = self.make_signed_federation_request( "PUT", diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 9b9c11fab7..712933f9ca 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -256,7 +256,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): ] for _ in range(0, 8): event = make_event_from_dict( - self.add_hashes_and_signatures( + self.add_hashes_and_signatures_from_other_server( { "origin_server_ts": 1, "type": "m.room.message", diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 4b1a8f04db..51c8dd6498 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -104,7 +104,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): # mock up a load of state events which we are missing state_events = [ make_event_from_dict( - self.add_hashes_and_signatures( + self.add_hashes_and_signatures_from_other_server( { "type": "test_state_type", "state_key": f"state_{i}", @@ -131,7 +131,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): # Depending on the test, we either persist this upfront (as an outlier), # or let the server request it. prev_event = make_event_from_dict( - self.add_hashes_and_signatures( + self.add_hashes_and_signatures_from_other_server( { "type": "test_regular_type", "room_id": room_id, @@ -165,7 +165,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): # mock up a regular event to pass into _process_pulled_event pulled_event = make_event_from_dict( - self.add_hashes_and_signatures( + self.add_hashes_and_signatures_from_other_server( { "type": "test_regular_type", "room_id": room_id, diff --git a/tests/test_visibility.py b/tests/test_visibility.py index f338af6c36..c385b2f8d4 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -272,7 +272,7 @@ class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase): "state_key": "@user:test", "content": {"membership": "invite"}, } - self.add_hashes_and_signatures(invite_pdu) + self.add_hashes_and_signatures_from_other_server(invite_pdu) invite_event_id = make_event_from_dict(invite_pdu, RoomVersions.V9).event_id self.get_success( diff --git a/tests/unittest.py b/tests/unittest.py index c645dd3563..7b97a4bf6e 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -838,7 +838,7 @@ class FederatingHomeserverTestCase(HomeserverTestCase): client_ip=client_ip, ) - def add_hashes_and_signatures( + def add_hashes_and_signatures_from_other_server( self, event_dict: JsonDict, room_version: RoomVersion = KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], -- cgit 1.4.1 From 7218a0ca1871c881070a25e33bb1198f51ba1e3a Mon Sep 17 00:00:00 2001 From: Jacek Kuśnierz Date: Tue, 12 Jul 2022 20:48:29 +0200 Subject: Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token` (#13239) Fixes #13201 Signed-off-by: Jacek Kusnierz jacek.kusnierz@tum.de --- changelog.d/13239.removal | 1 + synapse/handlers/identity.py | 30 ++++++------------------------ synapse/rest/client/account.py | 6 ++++-- 3 files changed, 11 insertions(+), 26 deletions(-) create mode 100644 changelog.d/13239.removal diff --git a/changelog.d/13239.removal b/changelog.d/13239.removal new file mode 100644 index 0000000000..8f6045176d --- /dev/null +++ b/changelog.d/13239.removal @@ -0,0 +1 @@ +Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. \ No newline at end of file diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index c70fdcc85e..164d891e90 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -162,8 +162,7 @@ class IdentityHandler: sid: str, mxid: str, id_server: str, - id_access_token: Optional[str] = None, - use_v2: bool = True, + id_access_token: str, ) -> JsonDict: """Bind a 3PID to an identity server @@ -173,8 +172,7 @@ class IdentityHandler: mxid: The MXID to bind the 3PID to id_server: The domain of the identity server to query id_access_token: The access token to authenticate to the identity - server with, if necessary. Required if use_v2 is true - use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True + server with Raises: SynapseError: On any of the following conditions @@ -186,24 +184,15 @@ class IdentityHandler: """ logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server) - # If an id_access_token is not supplied, force usage of v1 - if id_access_token is None: - use_v2 = False - if not valid_id_server_location(id_server): raise SynapseError( 400, "id_server must be a valid hostname with optional port and path components", ) - # Decide which API endpoint URLs to use - headers = {} bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} - if use_v2: - bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) - headers["Authorization"] = create_id_access_token_header(id_access_token) # type: ignore - else: - bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,) + bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) + headers = {"Authorization": create_id_access_token_header(id_access_token)} try: # Use the blacklisting http client as this call is only to identity servers @@ -222,21 +211,14 @@ class IdentityHandler: return data except HttpResponseException as e: - if e.code != 404 or not use_v2: - logger.error("3PID bind failed with Matrix error: %r", e) - raise e.to_synapse_error() + logger.error("3PID bind failed with Matrix error: %r", e) + raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except CodeMessageException as e: data = json_decoder.decode(e.msg) # XXX WAT? return data - logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url) - res = await self.bind_threepid( - client_secret, sid, mxid, id_server, id_access_token, use_v2=False - ) - return res - async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool: """Attempt to remove a 3PID from an identity server, or if one is not provided, all identity servers we're aware the binding is present on diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 19c2da4244..0cc87a4001 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -704,10 +704,12 @@ class ThreepidBindRestServlet(RestServlet): async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: body = parse_json_object_from_request(request) - assert_params_in_dict(body, ["id_server", "sid", "client_secret"]) + assert_params_in_dict( + body, ["id_server", "sid", "id_access_token", "client_secret"] + ) id_server = body["id_server"] sid = body["sid"] - id_access_token = body.get("id_access_token") # optional + id_access_token = body["id_access_token"] client_secret = body["client_secret"] assert_valid_client_secret(client_secret) -- cgit 1.4.1 From a366b75b7241512b319d28ba95c65995e24de9ed Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 12 Jul 2022 19:52:06 +0100 Subject: Drop unused table `event_reference_hashes` (#13218) This is unused since Synapse 1.60.0 (#12679). It's time for it to go. --- changelog.d/13218.misc | 1 + .../main/delta/72/03drop_event_reference_hashes.sql | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog.d/13218.misc create mode 100644 synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql diff --git a/changelog.d/13218.misc b/changelog.d/13218.misc new file mode 100644 index 0000000000..b1c8e5c747 --- /dev/null +++ b/changelog.d/13218.misc @@ -0,0 +1 @@ +Remove unused database table `event_reference_hashes`. diff --git a/synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql b/synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql new file mode 100644 index 0000000000..0da668aa3a --- /dev/null +++ b/synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql @@ -0,0 +1,17 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- event_reference_hashes is unused, so we can drop it +DROP TABLE event_reference_hashes; -- cgit 1.4.1 From 1381563988c6dc7a2b8801b736b1f0c663970da8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 12 Jul 2022 15:01:58 -0400 Subject: Inline URL preview documentation. (#13261) Inline URL preview documentation near the implementation. --- changelog.d/13233.doc | 3 +- changelog.d/13261.doc | 1 + docs/SUMMARY.md | 1 - docs/admin_api/user_admin_api.md | 2 +- docs/development/url_previews.md | 62 --------------------------- docs/media_repository.md | 5 +-- synapse/rest/media/v1/preview_url_resource.py | 62 +++++++++++++++++++++++++-- 7 files changed, 62 insertions(+), 74 deletions(-) create mode 100644 changelog.d/13261.doc delete mode 100644 docs/development/url_previews.md diff --git a/changelog.d/13233.doc b/changelog.d/13233.doc index b6babd7f15..3eaea7c5e3 100644 --- a/changelog.d/13233.doc +++ b/changelog.d/13233.doc @@ -1,2 +1 @@ -Add a link to configuration instructions in the URL preview documentation. - +Move the documentation for how URL previews work to the URL preview module. diff --git a/changelog.d/13261.doc b/changelog.d/13261.doc new file mode 100644 index 0000000000..3eaea7c5e3 --- /dev/null +++ b/changelog.d/13261.doc @@ -0,0 +1 @@ +Move the documentation for how URL previews work to the URL preview module. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8d6030e34a..f54b571d38 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -35,7 +35,6 @@ - [Application Services](application_services.md) - [Server Notices](server_notices.md) - [Consent Tracking](consent_tracking.md) - - [URL Previews](development/url_previews.md) - [User Directory](user_directory.md) - [Message Retention Policies](message_retention_policies.md) - [Pluggable Modules](modules/index.md) diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 1235f1cb95..0871cfebf5 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -544,7 +544,7 @@ Gets a list of all local media that a specific `user_id` has created. These are media that the user has uploaded themselves ([local media](../media_repository.md#local-media)), as well as [URL preview images](../media_repository.md#url-previews) requested by the user if the -[feature is enabled](../development/url_previews.md). +[feature is enabled](../usage/configuration/config_documentation.md#url_preview_enabled). By default, the response is ordered by descending creation date and ascending media ID. The newest media is on top. You can change the order with parameters diff --git a/docs/development/url_previews.md b/docs/development/url_previews.md deleted file mode 100644 index 25f189683d..0000000000 --- a/docs/development/url_previews.md +++ /dev/null @@ -1,62 +0,0 @@ -URL Previews -============ -For information on how to enable URL previews in synapse, please see the [config manual](../usage/configuration/config_documentation.md#url_preview_enabled). - -The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API -for URLs which outputs [Open Graph](https://ogp.me/) responses (with some Matrix -specific additions). - -This does have trade-offs compared to other designs: - -* Pros: - * Simple and flexible; can be used by any clients at any point -* Cons: - * If each homeserver provides one of these independently, all the HSes in a - room may needlessly DoS the target URI - * The URL metadata must be stored somewhere, rather than just using Matrix - itself to store the media. - * Matrix cannot be used to distribute the metadata between homeservers. - -When Synapse is asked to preview a URL it does the following: - -1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the - config). -2. Checks the in-memory cache by URLs and returns the result if it exists. (This - is also used to de-duplicate processing of multiple in-flight requests at once.) -3. Kicks off a background process to generate a preview: - 1. Checks the database cache by URL and timestamp and returns the result if it - has not expired and was successful (a 2xx return code). - 2. Checks if the URL matches an [oEmbed](https://oembed.com/) pattern. If it - does, update the URL to download. - 3. Downloads the URL and stores it into a file via the media storage provider - and saves the local media metadata. - 4. If the media is an image: - 1. Generates thumbnails. - 2. Generates an Open Graph response based on image properties. - 5. If the media is HTML: - 1. Decodes the HTML via the stored file. - 2. Generates an Open Graph response from the HTML. - 3. If a JSON oEmbed URL was found in the HTML via autodiscovery: - 1. Downloads the URL and stores it into a file via the media storage provider - and saves the local media metadata. - 2. Convert the oEmbed response to an Open Graph response. - 3. Override any Open Graph data from the HTML with data from oEmbed. - 4. If an image exists in the Open Graph response: - 1. Downloads the URL and stores it into a file via the media storage - provider and saves the local media metadata. - 2. Generates thumbnails. - 3. Updates the Open Graph response based on image properties. - 6. If the media is JSON and an oEmbed URL was found: - 1. Convert the oEmbed response to an Open Graph response. - 2. If a thumbnail or image is in the oEmbed response: - 1. Downloads the URL and stores it into a file via the media storage - provider and saves the local media metadata. - 2. Generates thumbnails. - 3. Updates the Open Graph response based on image properties. - 7. Stores the result in the database cache. -4. Returns the result. - -The in-memory cache expires after 1 hour. - -Expired entries in the database cache (and their associated media files) are -deleted every 10 seconds. The default expiration time is 1 hour from download. diff --git a/docs/media_repository.md b/docs/media_repository.md index ba17f8a856..23e6da7f31 100644 --- a/docs/media_repository.md +++ b/docs/media_repository.md @@ -7,8 +7,7 @@ The media repository users. * caches avatars, attachments and their thumbnails for media uploaded by remote users. - * caches resources and thumbnails used for - [URL previews](development/url_previews.md). + * caches resources and thumbnails used for URL previews. All media in Matrix can be identified by a unique [MXC URI](https://spec.matrix.org/latest/client-server-api/#matrix-content-mxc-uris), @@ -59,8 +58,6 @@ remote_thumbnail/matrix.org/aa/bb/cccccccccccccccccccc/128-96-image-jpeg Note that `remote_thumbnail/` does not have an `s`. ## URL Previews -See [URL Previews](development/url_previews.md) for documentation on the URL preview -process. When generating previews for URLs, Synapse may download and cache various resources, including images. These resources are assigned temporary media IDs diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 54a849eac9..b36c98a08e 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -109,10 +109,64 @@ class MediaInfo: class PreviewUrlResource(DirectServeJsonResource): """ - Generating URL previews is a complicated task which many potential pitfalls. - - See docs/development/url_previews.md for discussion of the design and - algorithm followed in this module. + The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API + for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix + specific additions). + + This does have trade-offs compared to other designs: + + * Pros: + * Simple and flexible; can be used by any clients at any point + * Cons: + * If each homeserver provides one of these independently, all the homeservers in a + room may needlessly DoS the target URI + * The URL metadata must be stored somewhere, rather than just using Matrix + itself to store the media. + * Matrix cannot be used to distribute the metadata between homeservers. + + When Synapse is asked to preview a URL it does the following: + + 1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the + config). + 2. Checks the URL against an in-memory cache and returns the result if it exists. (This + is also used to de-duplicate processing of multiple in-flight requests at once.) + 3. Kicks off a background process to generate a preview: + 1. Checks URL and timestamp against the database cache and returns the result if it + has not expired and was successful (a 2xx return code). + 2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it + does, update the URL to download. + 3. Downloads the URL and stores it into a file via the media storage provider + and saves the local media metadata. + 4. If the media is an image: + 1. Generates thumbnails. + 2. Generates an Open Graph response based on image properties. + 5. If the media is HTML: + 1. Decodes the HTML via the stored file. + 2. Generates an Open Graph response from the HTML. + 3. If a JSON oEmbed URL was found in the HTML via autodiscovery: + 1. Downloads the URL and stores it into a file via the media storage provider + and saves the local media metadata. + 2. Convert the oEmbed response to an Open Graph response. + 3. Override any Open Graph data from the HTML with data from oEmbed. + 4. If an image exists in the Open Graph response: + 1. Downloads the URL and stores it into a file via the media storage + provider and saves the local media metadata. + 2. Generates thumbnails. + 3. Updates the Open Graph response based on image properties. + 6. If the media is JSON and an oEmbed URL was found: + 1. Convert the oEmbed response to an Open Graph response. + 2. If a thumbnail or image is in the oEmbed response: + 1. Downloads the URL and stores it into a file via the media storage + provider and saves the local media metadata. + 2. Generates thumbnails. + 3. Updates the Open Graph response based on image properties. + 7. Stores the result in the database cache. + 4. Returns the result. + + The in-memory cache expires after 1 hour. + + Expired entries in the database cache (and their associated media files) are + deleted every 10 seconds. The default expiration time is 1 hour from download. """ isLeaf = True -- cgit 1.4.1 From 0312ff44c69ba16371206fc42b9f886b24253bcd Mon Sep 17 00:00:00 2001 From: Thomas Weston Date: Wed, 13 Jul 2022 11:33:21 +0100 Subject: Fix "add user" admin api error when request contains a "msisdn" threepid (#13263) Co-authored-by: Thomas Weston Co-authored-by: David Robertson --- changelog.d/13263.bugfix | 1 + synapse/rest/admin/users.py | 1 + tests/rest/admin/test_user.py | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+) create mode 100644 changelog.d/13263.bugfix diff --git a/changelog.d/13263.bugfix b/changelog.d/13263.bugfix new file mode 100644 index 0000000000..91e1d1e7eb --- /dev/null +++ b/changelog.d/13263.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the "enable_email_notifs" and "email_notifs_for_new_users" options were enabled. Contributed by @thomasweston12. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index f0614a2897..ba2f7fa6d8 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -373,6 +373,7 @@ class UserRestServletV2(RestServlet): if ( self.hs.config.email.email_enable_notifs and self.hs.config.email.email_notif_for_new_users + and medium == "email" ): await self.pusher_pool.add_pusher( user_id=user_id, diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index e32aaadb98..97693cd1e2 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1636,6 +1636,41 @@ class UserRestTestCase(unittest.HomeserverTestCase): ) self.assertEqual(len(pushers), 0) + @override_config( + { + "email": { + "enable_notifs": True, + "notif_for_new_users": True, + "notif_from": "test@example.com", + }, + "public_baseurl": "https://example.com", + } + ) + def test_create_user_email_notif_for_new_users_with_msisdn_threepid(self) -> None: + """ + Check that a new regular user is created successfully when they have a msisdn + threepid and email notif_for_new_users is set to True. + """ + url = self.url_prefix % "@bob:test" + + # Create user + body = { + "password": "abc123", + "threepids": [{"medium": "msisdn", "address": "1234567890"}], + } + + channel = self.make_request( + "PUT", + url, + access_token=self.admin_user_tok, + content=body, + ) + + self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("msisdn", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("1234567890", channel.json_body["threepids"][0]["address"]) + def test_set_password(self) -> None: """ Test setting a new password for another user. -- cgit 1.4.1 From 90e9b4fa1e51a271e3cf46d38efd5d93c0c99857 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 13 Jul 2022 08:30:42 -0400 Subject: Do not fail build if complement with workers fails. (#13266) --- .github/workflows/tests.yml | 27 ++++++++++++++++++++++++--- changelog.d/13266.misc | 1 + 2 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13266.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4bc29c8207..c8b033e8a4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -328,9 +328,6 @@ jobs: - arrangement: monolith database: Postgres - - arrangement: workers - database: Postgres - steps: - name: Run actions/checkout@v2 for synapse uses: actions/checkout@v2 @@ -346,6 +343,30 @@ jobs: shell: bash name: Run Complement Tests + # XXX When complement with workers is stable, move this back into the standard + # "complement" matrix above. + # + # See https://github.com/matrix-org/synapse/issues/13161 + complement-workers: + if: "${{ !failure() && !cancelled() }}" + needs: linting-done + runs-on: ubuntu-latest + + steps: + - name: Run actions/checkout@v2 for synapse + uses: actions/checkout@v2 + with: + path: synapse + + - name: Prepare Complement's Prerequisites + run: synapse/.ci/scripts/setup_complement_prerequisites.sh + + - run: | + set -o pipefail + POSTGRES=1 WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + shell: bash + name: Run Complement Tests + # a job which marks all the other jobs as complete, thus allowing PRs to be merged. tests-done: if: ${{ always() }} diff --git a/changelog.d/13266.misc b/changelog.d/13266.misc new file mode 100644 index 0000000000..d583acb81b --- /dev/null +++ b/changelog.d/13266.misc @@ -0,0 +1 @@ +Do not fail build if complement with workers fails. -- cgit 1.4.1 From 4db7862e0fce1a7b1a84458d572ba006b032c737 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 13 Jul 2022 09:55:14 -0400 Subject: Drop unused tables from groups/communities. (#12967) These tables have been unused since Synapse v1.61.0, although schema version 72 was added in Synapse v1.62.0. --- changelog.d/12967.removal | 1 + synapse/_scripts/synapse_port_db.py | 16 ----------- synapse/storage/schema/__init__.py | 7 ++--- .../schema/main/delta/72/03remove_groups.sql | 31 ++++++++++++++++++++++ 4 files changed, 36 insertions(+), 19 deletions(-) create mode 100644 changelog.d/12967.removal create mode 100644 synapse/storage/schema/main/delta/72/03remove_groups.sql diff --git a/changelog.d/12967.removal b/changelog.d/12967.removal new file mode 100644 index 0000000000..0aafd6a4d9 --- /dev/null +++ b/changelog.d/12967.removal @@ -0,0 +1 @@ +Drop tables used for groups/communities. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 26834a437e..543bba27c2 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -166,22 +166,6 @@ IGNORED_TABLES = { "ui_auth_sessions", "ui_auth_sessions_credentials", "ui_auth_sessions_ips", - # Groups/communities is no longer supported. - "group_attestations_remote", - "group_attestations_renewals", - "group_invites", - "group_roles", - "group_room_categories", - "group_rooms", - "group_summary_roles", - "group_summary_room_categories", - "group_summary_rooms", - "group_summary_users", - "group_users", - "groups", - "local_group_membership", - "local_group_updates", - "remote_profile_cache", } diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index dc237e3032..a9a88c8bfd 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -74,13 +74,14 @@ Changes in SCHEMA_VERSION = 71: Changes in SCHEMA_VERSION = 72: - event_edges.(room_id, is_state) are no longer written to. + - Tables related to groups are dropped. """ SCHEMA_COMPAT_VERSION = ( - # We no longer maintain `event_edges.room_id`, so synapses with SCHEMA_VERSION < 71 - # will break. - 71 + # The groups tables are no longer accessible, so synapses with SCHEMA_VERSION < 72 + # could break. + 72 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/72/03remove_groups.sql b/synapse/storage/schema/main/delta/72/03remove_groups.sql new file mode 100644 index 0000000000..b7c5894de8 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/03remove_groups.sql @@ -0,0 +1,31 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Remove the tables which powered the unspecced groups/communities feature. +DROP TABLE IF EXISTS group_attestations_remote; +DROP TABLE IF EXISTS group_attestations_renewals; +DROP TABLE IF EXISTS group_invites; +DROP TABLE IF EXISTS group_roles; +DROP TABLE IF EXISTS group_room_categories; +DROP TABLE IF EXISTS group_rooms; +DROP TABLE IF EXISTS group_summary_roles; +DROP TABLE IF EXISTS group_summary_room_categories; +DROP TABLE IF EXISTS group_summary_rooms; +DROP TABLE IF EXISTS group_summary_users; +DROP TABLE IF EXISTS group_users; +DROP TABLE IF EXISTS groups; +DROP TABLE IF EXISTS local_group_membership; +DROP TABLE IF EXISTS local_group_updates; +DROP TABLE IF EXISTS remote_profile_cache; -- cgit 1.4.1 From 3371e1abcb607776c6351ab5d73ba38f1db8a8f2 Mon Sep 17 00:00:00 2001 From: Brad Murray Date: Wed, 13 Jul 2022 10:18:20 -0400 Subject: Add prometheus counters for content types other than events (#13175) --- changelog.d/13175.misc | 1 + synapse/appservice/api.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 changelog.d/13175.misc diff --git a/changelog.d/13175.misc b/changelog.d/13175.misc new file mode 100644 index 0000000000..f273b3d6ca --- /dev/null +++ b/changelog.d/13175.misc @@ -0,0 +1 @@ +Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index df1c214462..0963fb3bb4 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -53,6 +53,18 @@ sent_events_counter = Counter( "synapse_appservice_api_sent_events", "Number of events sent to the AS", ["service"] ) +sent_ephemeral_counter = Counter( + "synapse_appservice_api_sent_ephemeral", + "Number of ephemeral events sent to the AS", + ["service"], +) + +sent_todevice_counter = Counter( + "synapse_appservice_api_sent_todevice", + "Number of todevice messages sent to the AS", + ["service"], +) + HOUR_IN_MS = 60 * 60 * 1000 @@ -310,6 +322,8 @@ class ApplicationServiceApi(SimpleHttpClient): ) sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(serialized_events)) + sent_ephemeral_counter.labels(service.id).inc(len(ephemeral)) + sent_todevice_counter.labels(service.id).inc(len(to_device_messages)) return True except CodeMessageException as e: logger.warning( -- cgit 1.4.1 From 1d5c80b16188c587427d663c3bec57e9c196dd1b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 13 Jul 2022 13:23:16 -0400 Subject: Reduce duplicate code in receipts servlets. (#13198) --- changelog.d/13198.misc | 1 + synapse/rest/client/read_marker.py | 56 ++++++++++++++++---------------------- synapse/rest/client/receipts.py | 20 ++++++-------- 3 files changed, 33 insertions(+), 44 deletions(-) create mode 100644 changelog.d/13198.misc diff --git a/changelog.d/13198.misc b/changelog.d/13198.misc new file mode 100644 index 0000000000..5aef2432df --- /dev/null +++ b/changelog.d/13198.misc @@ -0,0 +1 @@ +Refactor receipts servlet logic to avoid duplicated code. diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index 3644705e6a..8896f2df50 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -40,6 +40,10 @@ class ReadMarkerRestServlet(RestServlet): self.read_marker_handler = hs.get_read_marker_handler() self.presence_handler = hs.get_presence_handler() + self._known_receipt_types = {ReceiptTypes.READ, ReceiptTypes.FULLY_READ} + if hs.config.experimental.msc2285_enabled: + self._known_receipt_types.add(ReceiptTypes.READ_PRIVATE) + async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: @@ -49,13 +53,7 @@ class ReadMarkerRestServlet(RestServlet): body = parse_json_object_from_request(request) - valid_receipt_types = { - ReceiptTypes.READ, - ReceiptTypes.FULLY_READ, - ReceiptTypes.READ_PRIVATE, - } - - unrecognized_types = set(body.keys()) - valid_receipt_types + unrecognized_types = set(body.keys()) - self._known_receipt_types if unrecognized_types: # It's fine if there are unrecognized receipt types, but let's log # it to help debug clients that have typoed the receipt type. @@ -65,31 +63,25 @@ class ReadMarkerRestServlet(RestServlet): # types. logger.info("Ignoring unrecognized receipt types: %s", unrecognized_types) - read_event_id = body.get(ReceiptTypes.READ, None) - if read_event_id: - await self.receipts_handler.received_client_receipt( - room_id, - ReceiptTypes.READ, - user_id=requester.user.to_string(), - event_id=read_event_id, - ) - - read_private_event_id = body.get(ReceiptTypes.READ_PRIVATE, None) - if read_private_event_id and self.config.experimental.msc2285_enabled: - await self.receipts_handler.received_client_receipt( - room_id, - ReceiptTypes.READ_PRIVATE, - user_id=requester.user.to_string(), - event_id=read_private_event_id, - ) - - read_marker_event_id = body.get(ReceiptTypes.FULLY_READ, None) - if read_marker_event_id: - await self.read_marker_handler.received_client_read_marker( - room_id, - user_id=requester.user.to_string(), - event_id=read_marker_event_id, - ) + for receipt_type in self._known_receipt_types: + event_id = body.get(receipt_type, None) + # TODO Add validation to reject non-string event IDs. + if not event_id: + continue + + if receipt_type == ReceiptTypes.FULLY_READ: + await self.read_marker_handler.received_client_read_marker( + room_id, + user_id=requester.user.to_string(), + event_id=event_id, + ) + else: + await self.receipts_handler.received_client_receipt( + room_id, + receipt_type, + user_id=requester.user.to_string(), + event_id=event_id, + ) return 200, {} diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index 4b03eb876b..409bfd43c1 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -39,31 +39,27 @@ class ReceiptRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.hs = hs self.auth = hs.get_auth() self.receipts_handler = hs.get_receipts_handler() self.read_marker_handler = hs.get_read_marker_handler() self.presence_handler = hs.get_presence_handler() + self._known_receipt_types = {ReceiptTypes.READ} + if hs.config.experimental.msc2285_enabled: + self._known_receipt_types.update( + (ReceiptTypes.READ_PRIVATE, ReceiptTypes.FULLY_READ) + ) + async def on_POST( self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - if self.hs.config.experimental.msc2285_enabled and receipt_type not in [ - ReceiptTypes.READ, - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.FULLY_READ, - ]: + if receipt_type not in self._known_receipt_types: raise SynapseError( 400, - "Receipt type must be 'm.read', 'org.matrix.msc2285.read.private' or 'm.fully_read'", + f"Receipt type must be {', '.join(self._known_receipt_types)}", ) - elif ( - not self.hs.config.experimental.msc2285_enabled - and receipt_type != ReceiptTypes.READ - ): - raise SynapseError(400, "Receipt type must be 'm.read'") parse_json_object_from_request(request, allow_empty_body=False) -- cgit 1.4.1 From 982fe2965515e4536a0aa0153fa6bee238179f51 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Wed, 13 Jul 2022 20:32:46 +0200 Subject: Optimise room creation event lookups part 2 (#13224) --- changelog.d/13224.misc | 1 + synapse/handlers/room.py | 45 +++++++++++++++++++++++++++++++++-------- synapse/handlers/room_member.py | 43 ++++++++++++++++++++++++++++++++------- tests/rest/client/test_rooms.py | 8 ++++---- 4 files changed, 78 insertions(+), 19 deletions(-) create mode 100644 changelog.d/13224.misc diff --git a/changelog.d/13224.misc b/changelog.d/13224.misc new file mode 100644 index 0000000000..41f8693b74 --- /dev/null +++ b/changelog.d/13224.misc @@ -0,0 +1 @@ +Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a54f163c0a..978d3ee39f 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -889,7 +889,11 @@ class RoomCreationHandler: # override any attempt to set room versions via the creation_content creation_content["room_version"] = room_version.identifier - last_stream_id = await self._send_events_for_new_room( + ( + last_stream_id, + last_sent_event_id, + depth, + ) = await self._send_events_for_new_room( requester, room_id, preset_config=preset_config, @@ -905,7 +909,7 @@ class RoomCreationHandler: if "name" in config: name = config["name"] ( - _, + name_event, last_stream_id, ) = await self.event_creation_handler.create_and_send_nonmember_event( requester, @@ -917,12 +921,16 @@ class RoomCreationHandler: "content": {"name": name}, }, ratelimit=False, + prev_event_ids=[last_sent_event_id], + depth=depth, ) + last_sent_event_id = name_event.event_id + depth += 1 if "topic" in config: topic = config["topic"] ( - _, + topic_event, last_stream_id, ) = await self.event_creation_handler.create_and_send_nonmember_event( requester, @@ -934,7 +942,11 @@ class RoomCreationHandler: "content": {"topic": topic}, }, ratelimit=False, + prev_event_ids=[last_sent_event_id], + depth=depth, ) + last_sent_event_id = topic_event.event_id + depth += 1 # we avoid dropping the lock between invites, as otherwise joins can # start coming in and making the createRoom slow. @@ -949,7 +961,7 @@ class RoomCreationHandler: for invitee in invite_list: ( - _, + member_event_id, last_stream_id, ) = await self.room_member_handler.update_membership_locked( requester, @@ -959,7 +971,11 @@ class RoomCreationHandler: ratelimit=False, content=content, new_room=True, + prev_event_ids=[last_sent_event_id], + depth=depth, ) + last_sent_event_id = member_event_id + depth += 1 for invite_3pid in invite_3pid_list: id_server = invite_3pid["id_server"] @@ -968,7 +984,10 @@ class RoomCreationHandler: medium = invite_3pid["medium"] # Note that do_3pid_invite can raise a ShadowBanError, but this was # handled above by emptying invite_3pid_list. - last_stream_id = await self.hs.get_room_member_handler().do_3pid_invite( + ( + member_event_id, + last_stream_id, + ) = await self.hs.get_room_member_handler().do_3pid_invite( room_id, requester.user, medium, @@ -977,7 +996,11 @@ class RoomCreationHandler: requester, txn_id=None, id_access_token=id_access_token, + prev_event_ids=[last_sent_event_id], + depth=depth, ) + last_sent_event_id = member_event_id + depth += 1 result = {"room_id": room_id} @@ -1005,20 +1028,22 @@ class RoomCreationHandler: power_level_content_override: Optional[JsonDict] = None, creator_join_profile: Optional[JsonDict] = None, ratelimit: bool = True, - ) -> int: + ) -> Tuple[int, str, int]: """Sends the initial events into a new room. `power_level_content_override` doesn't apply when initial state has power level state event content. Returns: - The stream_id of the last event persisted. + A tuple containing the stream ID, event ID and depth of the last + event sent to the room. """ creator_id = creator.user.to_string() event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""} + depth = 1 last_sent_event_id: Optional[str] = None def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: @@ -1031,6 +1056,7 @@ class RoomCreationHandler: async def send(etype: str, content: JsonDict, **kwargs: Any) -> int: nonlocal last_sent_event_id + nonlocal depth event = create(etype, content, **kwargs) logger.debug("Sending %s in new room", etype) @@ -1047,9 +1073,11 @@ class RoomCreationHandler: # Note: we don't pass state_event_ids here because this triggers # an additional query per event to look them up from the events table. prev_event_ids=[last_sent_event_id] if last_sent_event_id else [], + depth=depth, ) last_sent_event_id = sent_event.event_id + depth += 1 return last_stream_id @@ -1075,6 +1103,7 @@ class RoomCreationHandler: content=creator_join_profile, new_room=True, prev_event_ids=[last_sent_event_id], + depth=depth, ) last_sent_event_id = member_event_id @@ -1168,7 +1197,7 @@ class RoomCreationHandler: content={"algorithm": RoomEncryptionAlgorithms.DEFAULT}, ) - return last_sent_stream_id + return last_sent_stream_id, last_sent_event_id, depth def _generate_room_id(self) -> str: """Generates a random room ID. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 04c44b2ccb..90e0b21600 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -285,6 +285,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, state_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, txn_id: Optional[str] = None, ratelimit: bool = True, content: Optional[dict] = None, @@ -315,6 +316,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): prev_events are set so we need to set them ourself via this argument. This should normally be left as None, which will cause the auth_event_ids to be calculated based on the room state at the prev_events. + depth: Override the depth used to order the event in the DAG. + Should normally be set to None, which will cause the depth to be calculated + based on the prev_events. txn_id: ratelimit: @@ -370,6 +374,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, state_event_ids=state_event_ids, + depth=depth, require_consent=require_consent, outlier=outlier, historical=historical, @@ -466,6 +471,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, state_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, ) -> Tuple[str, int]: """Update a user's membership in a room. @@ -501,6 +507,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): prev_events are set so we need to set them ourself via this argument. This should normally be left as None, which will cause the auth_event_ids to be calculated based on the room state at the prev_events. + depth: Override the depth used to order the event in the DAG. + Should normally be set to None, which will cause the depth to be calculated + based on the prev_events. Returns: A tuple of the new event ID and stream ID. @@ -540,6 +549,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, state_event_ids=state_event_ids, + depth=depth, ) return result @@ -562,6 +572,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, state_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, ) -> Tuple[str, int]: """Helper for update_membership. @@ -599,6 +610,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): prev_events are set so we need to set them ourself via this argument. This should normally be left as None, which will cause the auth_event_ids to be calculated based on the room state at the prev_events. + depth: Override the depth used to order the event in the DAG. + Should normally be set to None, which will cause the depth to be calculated + based on the prev_events. Returns: A tuple of the new event ID and stream ID. @@ -732,6 +746,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, state_event_ids=state_event_ids, + depth=depth, content=content, require_consent=require_consent, outlier=outlier, @@ -967,6 +982,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ratelimit=ratelimit, prev_event_ids=latest_event_ids, state_event_ids=state_event_ids, + depth=depth, content=content, require_consent=require_consent, outlier=outlier, @@ -1322,7 +1338,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): requester: Requester, txn_id: Optional[str], id_access_token: Optional[str] = None, - ) -> int: + prev_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, + ) -> Tuple[str, int]: """Invite a 3PID to a room. Args: @@ -1335,9 +1353,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): txn_id: The transaction ID this is part of, or None if this is not part of a transaction. id_access_token: The optional identity server access token. + depth: Override the depth used to order the event in the DAG. + prev_event_ids: The event IDs to use as the prev events + Should normally be set to None, which will cause the depth to be calculated + based on the prev_events. Returns: - The new stream ID. + Tuple of event ID and stream ordering position Raises: ShadowBanError if the requester has been shadow-banned. @@ -1383,7 +1405,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # We don't check the invite against the spamchecker(s) here (through # user_may_invite) because we'll do it further down the line anyway (in # update_membership_locked). - _, stream_id = await self.update_membership( + event_id, stream_id = await self.update_membership( requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id ) else: @@ -1402,7 +1424,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): additional_fields=spam_check[1], ) - stream_id = await self._make_and_store_3pid_invite( + event, stream_id = await self._make_and_store_3pid_invite( requester, id_server, medium, @@ -1411,9 +1433,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): inviter, txn_id=txn_id, id_access_token=id_access_token, + prev_event_ids=prev_event_ids, + depth=depth, ) + event_id = event.event_id - return stream_id + return event_id, stream_id async def _make_and_store_3pid_invite( self, @@ -1425,7 +1450,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): user: UserID, txn_id: Optional[str], id_access_token: Optional[str] = None, - ) -> int: + prev_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, + ) -> Tuple[EventBase, int]: room_state = await self._storage_controllers.state.get_current_state( room_id, StateFilter.from_types( @@ -1518,8 +1545,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): }, ratelimit=False, txn_id=txn_id, + prev_event_ids=prev_event_ids, + depth=depth, ) - return stream_id + return event, stream_id async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool: # Have we just created the room, and is this about to be the very diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index df7ffbe545..8ed5272b16 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -709,7 +709,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(37, channel.resource_usage.db_txn_count) + self.assertEqual(32, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -722,7 +722,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(41, channel.resource_usage.db_txn_count) + self.assertEqual(35, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id @@ -3283,7 +3283,7 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. - make_invite_mock = Mock(return_value=make_awaitable(0)) + make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock self.hs.get_identity_handler().lookup_3pid = Mock( return_value=make_awaitable(None), @@ -3344,7 +3344,7 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. - make_invite_mock = Mock(return_value=make_awaitable(0)) + make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock self.hs.get_identity_handler().lookup_3pid = Mock( return_value=make_awaitable(None), -- cgit 1.4.1 From 2341032cf2d031e58710d82c9ee1d2360f9b82f9 Mon Sep 17 00:00:00 2001 From: jejo86 <28619134+jejo86@users.noreply.github.com> Date: Wed, 13 Jul 2022 20:33:33 +0200 Subject: Document advising against publicly exposing the Admin API and provide a usage example (#13231) * Admin API request explanation improved Pointed out, that the Admin API is not accessible by default from any remote computer, but only from the PC `matrix-synapse` is running on. Added a full, working example, making sure to include the cURL flag `-X`, which needs to be prepended to `GET`, `POST`, `PUT` etc. and listing the full query string including protocol, IP address and port. * Admin API request explanation improved * Apply suggestions from code review Update changelog. Reword prose. Co-authored-by: David Robertson --- changelog.d/13231.doc | 1 + docs/usage/administration/admin_api/README.md | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog.d/13231.doc diff --git a/changelog.d/13231.doc b/changelog.d/13231.doc new file mode 100644 index 0000000000..e750f9da49 --- /dev/null +++ b/changelog.d/13231.doc @@ -0,0 +1 @@ +Provide an example of using the Admin API. Contributed by @jejo86. diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md index 3cbedc5dfa..c60b6da0de 100644 --- a/docs/usage/administration/admin_api/README.md +++ b/docs/usage/administration/admin_api/README.md @@ -18,6 +18,11 @@ already on your `$PATH` depending on how Synapse was installed. Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. ## Making an Admin API request +For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints) +that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a +reverse proxy. This means you should typically query the Admin API from a terminal on +the machine which runs Synapse. + Once you have your `access_token`, you will need to authenticate each request to an Admin API endpoint by providing the token as either a query parameter or a request header. To add it as a request header in cURL: @@ -25,5 +30,17 @@ providing the token as either a query parameter or a request header. To add it a curl --header "Authorization: Bearer " ``` +For example, suppose we want to +[query the account](user_admin_api.md#query-user-account) of the user +`@foo:bar.com`. We need an admin access token (e.g. +`syt_AjfVef2_L33JNpafeif_0feKJfeaf0CQpoZk`), and we need to know which port +Synapse's [`client` listener](config_documentation.md#listeners) is listening +on (e.g. `8008`). Then we can use the following command to request the account +information from the Admin API. + +```sh +curl --header "Authorization: Bearer syt_AjfVef2_L33JNpafeif_0feKJfeaf0CQpoZk" -X GET http://127.0.0.1:8008/_synapse/admin/v2/users/@foo:bar.com +``` + For more details on access tokens in Matrix, please refer to the complete [matrix spec documentation](https://matrix.org/docs/spec/client_server/r0.6.1#using-access-tokens). -- cgit 1.4.1 From ad5761b65cf6a3fe203d8ffc425ed45b3a0a02fa Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 13 Jul 2022 11:36:02 -0700 Subject: Add support for room version 10 (#13220) --- changelog.d/13220.feature | 1 + synapse/api/room_versions.py | 33 +++++++++++++++++++++++++++++++++ synapse/event_auth.py | 26 ++++++++++++++++++++++++++ tests/test_event_auth.py | 41 ++++++++++++++++++++++++++++++++++++++++- 4 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13220.feature diff --git a/changelog.d/13220.feature b/changelog.d/13220.feature new file mode 100644 index 0000000000..9b0240fdc8 --- /dev/null +++ b/changelog.d/13220.feature @@ -0,0 +1 @@ +Add support for room version 10. diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 3f85d61b46..00e81b3afc 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -84,6 +84,8 @@ class RoomVersion: # MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of # knocks and restricted join rules into the same join condition. msc3787_knock_restricted_join_rule: bool + # MSC3667: Enforce integer power levels + msc3667_int_only_power_levels: bool class RoomVersions: @@ -103,6 +105,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V2 = RoomVersion( "2", @@ -120,6 +123,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V3 = RoomVersion( "3", @@ -137,6 +141,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V4 = RoomVersion( "4", @@ -154,6 +159,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V5 = RoomVersion( "5", @@ -171,6 +177,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V6 = RoomVersion( "6", @@ -188,6 +195,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) MSC2176 = RoomVersion( "org.matrix.msc2176", @@ -205,6 +213,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V7 = RoomVersion( "7", @@ -222,6 +231,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V8 = RoomVersion( "8", @@ -239,6 +249,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V9 = RoomVersion( "9", @@ -256,6 +267,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) MSC2716v3 = RoomVersion( "org.matrix.msc2716v3", @@ -273,6 +285,7 @@ class RoomVersions: msc2716_historical=True, msc2716_redactions=True, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) MSC3787 = RoomVersion( "org.matrix.msc3787", @@ -290,6 +303,25 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=False, + ) + V10 = RoomVersion( + "10", + RoomDisposition.STABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2176_redaction_rules=False, + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=True, ) @@ -308,6 +340,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V9, RoomVersions.MSC2716v3, RoomVersions.MSC3787, + RoomVersions.V10, ) } diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 0fc2c4b27e..965cb265da 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -740,6 +740,32 @@ def _check_power_levels( except Exception: raise SynapseError(400, "Not a valid power level: %s" % (v,)) + # Reject events with stringy power levels if required by room version + if ( + event.type == EventTypes.PowerLevels + and room_version_obj.msc3667_int_only_power_levels + ): + for k, v in event.content.items(): + if k in { + "users_default", + "events_default", + "state_default", + "ban", + "redact", + "kick", + "invite", + }: + if not isinstance(v, int): + raise SynapseError(400, f"{v!r} must be an integer.") + if k in {"events", "notifications", "users"}: + if not isinstance(v, dict) or not all( + isinstance(v, int) for v in v.values() + ): + raise SynapseError( + 400, + f"{v!r} must be a dict wherein all the values are integers.", + ) + key = (event.type, event.state_key) current_state = auth_events.get(key) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 371cd201af..e42d7b9ba0 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -19,7 +19,7 @@ from parameterized import parameterized from synapse import event_auth from synapse.api.constants import EventContentFields -from synapse.api.errors import AuthError +from synapse.api.errors import AuthError, SynapseError from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.storage.databases.main.events_worker import EventRedactBehaviour @@ -689,6 +689,45 @@ class EventAuthTestCase(unittest.TestCase): auth_events.values(), ) + def test_room_v10_rejects_string_power_levels(self) -> None: + pl_event_content = {"users_default": "42"} + pl_event = make_event_from_dict( + { + "room_id": TEST_ROOM_ID, + **_maybe_get_event_id_dict_for_room_version(RoomVersions.V10), + "type": "m.room.power_levels", + "sender": "@test:test.com", + "state_key": "", + "content": pl_event_content, + "signatures": {"test.com": {"ed25519:0": "some9signature"}}, + }, + room_version=RoomVersions.V10, + ) + + pl_event2_content = {"events": {"m.room.name": "42", "m.room.power_levels": 42}} + pl_event2 = make_event_from_dict( + { + "room_id": TEST_ROOM_ID, + **_maybe_get_event_id_dict_for_room_version(RoomVersions.V10), + "type": "m.room.power_levels", + "sender": "@test:test.com", + "state_key": "", + "content": pl_event2_content, + "signatures": {"test.com": {"ed25519:0": "some9signature"}}, + }, + room_version=RoomVersions.V10, + ) + + with self.assertRaises(SynapseError): + event_auth._check_power_levels( + pl_event.room_version, pl_event, {("fake_type", "fake_key"): pl_event2} + ) + + with self.assertRaises(SynapseError): + event_auth._check_power_levels( + pl_event.room_version, pl_event2, {("fake_type", "fake_key"): pl_event} + ) + # helpers for making events TEST_DOMAIN = "example.com" -- cgit 1.4.1 From cc1071598ad45e682a14dc8b3c1fe553e8158593 Mon Sep 17 00:00:00 2001 From: Jacek Kuśnierz Date: Wed, 13 Jul 2022 20:43:17 +0200 Subject: Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. (#13240) * Drop support for v1 unbind Signed-off-by: Jacek Kusnierz * Add changelog Signed-off-by: Jacek Kusnierz * Update changelog.d/13240.misc --- changelog.d/13240.misc | 1 + synapse/handlers/identity.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13240.misc diff --git a/changelog.d/13240.misc b/changelog.d/13240.misc new file mode 100644 index 0000000000..0567e47d64 --- /dev/null +++ b/changelog.d/13240.misc @@ -0,0 +1 @@ +Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. \ No newline at end of file diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 164d891e90..9571d461c8 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -281,8 +281,8 @@ class IdentityHandler: "id_server must be a valid hostname with optional port and path components", ) - url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) - url_bytes = b"/_matrix/identity/api/v1/3pid/unbind" + url = "https://%s/_matrix/identity/v2/3pid/unbind" % (id_server,) + url_bytes = b"/_matrix/identity/v2/3pid/unbind" content = { "mxid": mxid, -- cgit 1.4.1 From 0eb7e697682a8033564df6d602a7098d9a93d03e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 13 Jul 2022 19:48:24 +0100 Subject: Notifier: accept callbacks to fire on room joins (#13254) --- changelog.d/13254.misc | 1 + synapse/notifier.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 changelog.d/13254.misc diff --git a/changelog.d/13254.misc b/changelog.d/13254.misc new file mode 100644 index 0000000000..cba6b9ee0f --- /dev/null +++ b/changelog.d/13254.misc @@ -0,0 +1 @@ +Preparatory work for a per-room rate limiter on joins. diff --git a/synapse/notifier.py b/synapse/notifier.py index 54b0ec4b97..c42bb8266a 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -228,6 +228,7 @@ class Notifier: # Called when there are new things to stream over replication self.replication_callbacks: List[Callable[[], None]] = [] + self._new_join_in_room_callbacks: List[Callable[[str, str], None]] = [] self._federation_client = hs.get_federation_http_client() @@ -280,6 +281,19 @@ class Notifier: """ self.replication_callbacks.append(cb) + def add_new_join_in_room_callback(self, cb: Callable[[str, str], None]) -> None: + """Add a callback that will be called when a user joins a room. + + This only fires on genuine membership changes, e.g. "invite" -> "join". + Membership transitions like "join" -> "join" (for e.g. displayname changes) do + not trigger the callback. + + When called, the callback receives two arguments: the event ID and the room ID. + It should *not* return a Deferred - if it needs to do any asynchronous work, a + background thread should be started and wrapped with run_as_background_process. + """ + self._new_join_in_room_callbacks.append(cb) + async def on_new_room_event( self, event: EventBase, @@ -723,6 +737,10 @@ class Notifier: for cb in self.replication_callbacks: cb() + def notify_user_joined_room(self, event_id: str, room_id: str) -> None: + for cb in self._new_join_in_room_callbacks: + cb(event_id, room_id) + def notify_remote_server_up(self, server: str) -> None: """Notify any replication that a remote server has come back up""" # We call federation_sender directly rather than registering as a -- cgit 1.4.1 From 599c403d996ed5d66cacd63abfd2e7a87279b927 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 13 Jul 2022 20:09:42 +0100 Subject: Allow rate limiters to passively record actions they cannot limit (#13253) Co-authored-by: Patrick Cloke --- changelog.d/13253.misc | 1 + synapse/api/ratelimiting.py | 94 ++++++++++++++++++++++++++++++++++++------ tests/api/test_ratelimiting.py | 74 +++++++++++++++++++++++++++++++++ 3 files changed, 157 insertions(+), 12 deletions(-) create mode 100644 changelog.d/13253.misc diff --git a/changelog.d/13253.misc b/changelog.d/13253.misc new file mode 100644 index 0000000000..cba6b9ee0f --- /dev/null +++ b/changelog.d/13253.misc @@ -0,0 +1 @@ +Preparatory work for a per-room rate limiter on joins. diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 54d13026c9..f43965c1c8 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -27,6 +27,33 @@ class Ratelimiter: """ Ratelimit actions marked by arbitrary keys. + (Note that the source code speaks of "actions" and "burst_count" rather than + "tokens" and a "bucket_size".) + + This is a "leaky bucket as a meter". For each key to be tracked there is a bucket + containing some number 0 <= T <= `burst_count` of tokens corresponding to previously + permitted requests for that key. Each bucket starts empty, and gradually leaks + tokens at a rate of `rate_hz`. + + Upon an incoming request, we must determine: + - the key that this request falls under (which bucket to inspect), and + - the cost C of this request in tokens. + Then, if there is room in the bucket for C tokens (T + C <= `burst_count`), + the request is permitted and `cost` tokens are added to the bucket. + Otherwise the request is denied, and the bucket continues to hold T tokens. + + This means that the limiter enforces an average request frequency of `rate_hz`, + while accumulating a buffer of up to `burst_count` requests which can be consumed + instantaneously. + + The tricky bit is the leaking. We do not want to have a periodic process which + leaks every bucket! Instead, we track + - the time point when the bucket was last completely empty, and + - how many tokens have added to the bucket permitted since then. + Then for each incoming request, we can calculate how many tokens have leaked + since this time point, and use that to decide if we should accept or reject the + request. + Args: clock: A homeserver clock, for retrieving the current time rate_hz: The long term number of actions that can be performed in a second. @@ -41,14 +68,30 @@ class Ratelimiter: self.burst_count = burst_count self.store = store - # A ordered dictionary keeping track of actions, when they were last - # performed and how often. Each entry is a mapping from a key of arbitrary type - # to a tuple representing: - # * How many times an action has occurred since a point in time - # * The point in time - # * The rate_hz of this particular entry. This can vary per request + # An ordered dictionary representing the token buckets tracked by this rate + # limiter. Each entry maps a key of arbitrary type to a tuple representing: + # * The number of tokens currently in the bucket, + # * The time point when the bucket was last completely empty, and + # * The rate_hz (leak rate) of this particular bucket. self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict() + def _get_key( + self, requester: Optional[Requester], key: Optional[Hashable] + ) -> Hashable: + """Use the requester's MXID as a fallback key if no key is provided.""" + if key is None: + if not requester: + raise ValueError("Must supply at least one of `requester` or `key`") + + key = requester.user.to_string() + return key + + def _get_action_counts( + self, key: Hashable, time_now_s: float + ) -> Tuple[float, float, float]: + """Retrieve the action counts, with a fallback representing an empty bucket.""" + return self.actions.get(key, (0.0, time_now_s, 0.0)) + async def can_do_action( self, requester: Optional[Requester], @@ -88,11 +131,7 @@ class Ratelimiter: * The reactor timestamp for when the action can be performed next. -1 if rate_hz is less than or equal to zero """ - if key is None: - if not requester: - raise ValueError("Must supply at least one of `requester` or `key`") - - key = requester.user.to_string() + key = self._get_key(requester, key) if requester: # Disable rate limiting of users belonging to any AS that is configured @@ -121,7 +160,7 @@ class Ratelimiter: self._prune_message_counts(time_now_s) # Check if there is an existing count entry for this key - action_count, time_start, _ = self.actions.get(key, (0.0, time_now_s, 0.0)) + action_count, time_start, _ = self._get_action_counts(key, time_now_s) # Check whether performing another action is allowed time_delta = time_now_s - time_start @@ -164,6 +203,37 @@ class Ratelimiter: return allowed, time_allowed + def record_action( + self, + requester: Optional[Requester], + key: Optional[Hashable] = None, + n_actions: int = 1, + _time_now_s: Optional[float] = None, + ) -> None: + """Record that an action(s) took place, even if they violate the rate limit. + + This is useful for tracking the frequency of events that happen across + federation which we still want to impose local rate limits on. For instance, if + we are alice.com monitoring a particular room, we cannot prevent bob.com + from joining users to that room. However, we can track the number of recent + joins in the room and refuse to serve new joins ourselves if there have been too + many in the room across both homeservers. + + Args: + requester: The requester that is doing the action, if any. + key: An arbitrary key used to classify an action. Defaults to the + requester's user ID. + n_actions: The number of times the user wants to do this action. If the user + cannot do all of the actions, the user's action count is not incremented + at all. + _time_now_s: The current time. Optional, defaults to the current time according + to self.clock. Only used by tests. + """ + key = self._get_key(requester, key) + time_now_s = _time_now_s if _time_now_s is not None else self.clock.time() + action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s) + self.actions[key] = (action_count + n_actions, time_start, rate_hz) + def _prune_message_counts(self, time_now_s: float) -> None: """Remove message count entries that have not exceeded their defined rate_hz limit diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index 18649c2c05..c86f783c5b 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -314,3 +314,77 @@ class TestRatelimiter(unittest.HomeserverTestCase): # Check that we get rate limited after using that token. self.assertFalse(consume_at(11.1)) + + def test_record_action_which_doesnt_fill_bucket(self) -> None: + limiter = Ratelimiter( + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + ) + + # Observe two actions, leaving room in the bucket for one more. + limiter.record_action(requester=None, key="a", n_actions=2, _time_now_s=0.0) + + # We should be able to take a new action now. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=0.0) + ) + self.assertTrue(success) + + # ... but not two. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=0.0) + ) + self.assertFalse(success) + + def test_record_action_which_fills_bucket(self) -> None: + limiter = Ratelimiter( + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + ) + + # Observe three actions, filling up the bucket. + limiter.record_action(requester=None, key="a", n_actions=3, _time_now_s=0.0) + + # We should be unable to take a new action now. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=0.0) + ) + self.assertFalse(success) + + # If we wait 10 seconds to leak a token, we should be able to take one action... + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=10.0) + ) + self.assertTrue(success) + + # ... but not two. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=10.0) + ) + self.assertFalse(success) + + def test_record_action_which_overfills_bucket(self) -> None: + limiter = Ratelimiter( + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + ) + + # Observe four actions, exceeding the bucket. + limiter.record_action(requester=None, key="a", n_actions=4, _time_now_s=0.0) + + # We should be prevented from taking a new action now. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=0.0) + ) + self.assertFalse(success) + + # If we wait 10 seconds to leak a token, we should be unable to take an action + # because the bucket is still full. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=10.0) + ) + self.assertFalse(success) + + # But after another 10 seconds we leak a second token, giving us room for + # action. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=20.0) + ) + self.assertTrue(success) -- cgit 1.4.1 From 0ca4172b5df6ea5f3e30ca6b83e51d230213971c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2022 14:57:02 +0100 Subject: Don't pull out state in `compute_event_context` for unconflicted state (#13267) --- changelog.d/13267.misc | 1 + synapse/handlers/message.py | 7 +- synapse/state/__init__.py | 117 +++++++++++++++----------- synapse/storage/controllers/__init__.py | 4 +- synapse/storage/controllers/persist_events.py | 12 ++- synapse/storage/databases/main/roommember.py | 35 ++------ tests/storage/test_roommember.py | 55 ------------ 7 files changed, 95 insertions(+), 136 deletions(-) create mode 100644 changelog.d/13267.misc diff --git a/changelog.d/13267.misc b/changelog.d/13267.misc new file mode 100644 index 0000000000..a334414320 --- /dev/null +++ b/changelog.d/13267.misc @@ -0,0 +1 @@ +Don't pull out state in `compute_event_context` for unconflicted state. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 1980e37dae..b5fede9496 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1444,7 +1444,12 @@ class EventCreationHandler: if state_entry.state_group in self._external_cache_joined_hosts_updates: return - joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry) + state = await state_entry.get_state( + self._storage_controllers.state, StateFilter.all() + ) + joined_hosts = await self.store.get_joined_hosts( + event.room_id, state, state_entry + ) # Note that the expiry times must be larger than the expiry time in # _external_cache_joined_hosts_updates. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 781d9f06da..9f0a36652c 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -31,7 +31,6 @@ from typing import ( Sequence, Set, Tuple, - Union, ) import attr @@ -47,6 +46,7 @@ from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServ from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo +from synapse.storage.state import StateFilter from synapse.types import StateMap from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache @@ -54,6 +54,7 @@ from synapse.util.metrics import Measure, measure_func if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.controllers import StateStorageController from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) @@ -83,17 +84,20 @@ def _gen_state_id() -> str: class _StateCacheEntry: - __slots__ = ["state", "state_group", "state_id", "prev_group", "delta_ids"] + __slots__ = ["state", "state_group", "prev_group", "delta_ids"] def __init__( self, - state: StateMap[str], + state: Optional[StateMap[str]], state_group: Optional[int], prev_group: Optional[int] = None, delta_ids: Optional[StateMap[str]] = None, ): + if state is None and state_group is None: + raise Exception("Either state or state group must be not None") + # A map from (type, state_key) to event_id. - self.state = frozendict(state) + self.state = frozendict(state) if state is not None else None # the ID of a state group if one and only one is involved. # otherwise, None otherwise? @@ -102,20 +106,30 @@ class _StateCacheEntry: self.prev_group = prev_group self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None - # The `state_id` is a unique ID we generate that can be used as ID for - # this collection of state. Usually this would be the same as the - # state group, but on worker instances we can't generate a new state - # group each time we resolve state, so we generate a separate one that - # isn't persisted and is used solely for caches. - # `state_id` is either a state_group (and so an int) or a string. This - # ensures we don't accidentally persist a state_id as a stateg_group - if state_group: - self.state_id: Union[str, int] = state_group - else: - self.state_id = _gen_state_id() + async def get_state( + self, + state_storage: "StateStorageController", + state_filter: Optional["StateFilter"] = None, + ) -> StateMap[str]: + """Get the state map for this entry, either from the in-memory state or + looking up the state group in the DB. + """ + + if self.state is not None: + return self.state + + assert self.state_group is not None + + return await state_storage.get_state_ids_for_group( + self.state_group, state_filter + ) def __len__(self) -> int: - return len(self.state) + # The len should is used to estimate how large this cache entry is, for + # cache eviction purposes. This is why if `self.state` is None it's fine + # to return 1. + + return len(self.state) if self.state else 1 class StateHandler: @@ -153,7 +167,7 @@ class StateHandler: """ logger.debug("calling resolve_state_groups from get_current_state_ids") ret = await self.resolve_state_groups_for_events(room_id, latest_event_ids) - return ret.state + return await ret.get_state(self._state_storage_controller, StateFilter.all()) async def get_current_users_in_room( self, room_id: str, latest_event_ids: List[str] @@ -177,7 +191,8 @@ class StateHandler: logger.debug("calling resolve_state_groups from get_current_users_in_room") entry = await self.resolve_state_groups_for_events(room_id, latest_event_ids) - return await self.store.get_joined_users_from_state(room_id, entry) + state = await entry.get_state(self._state_storage_controller, StateFilter.all()) + return await self.store.get_joined_users_from_state(room_id, state, entry) async def get_hosts_in_room_at_events( self, room_id: str, event_ids: Collection[str] @@ -192,7 +207,8 @@ class StateHandler: The hosts in the room at the given events """ entry = await self.resolve_state_groups_for_events(room_id, event_ids) - return await self.store.get_joined_hosts(room_id, entry) + state = await entry.get_state(self._state_storage_controller, StateFilter.all()) + return await self.store.get_joined_hosts(room_id, state, entry) async def compute_event_context( self, @@ -227,10 +243,19 @@ class StateHandler: # if state_ids_before_event: # if we're given the state before the event, then we use that - state_group_before_event = None state_group_before_event_prev_group = None deltas_to_state_group_before_event = None - entry = None + + # .. though we need to get a state group for it. + state_group_before_event = ( + await self._state_storage_controller.store_state_group( + event.event_id, + event.room_id, + prev_group=None, + delta_ids=None, + current_state_ids=state_ids_before_event, + ) + ) else: # otherwise, we'll need to resolve the state across the prev_events. @@ -264,36 +289,27 @@ class StateHandler: await_full_state=False, ) - state_ids_before_event = entry.state - state_group_before_event = entry.state_group state_group_before_event_prev_group = entry.prev_group deltas_to_state_group_before_event = entry.delta_ids - # - # make sure that we have a state group at that point. If it's not a state event, - # that will be the state group for the new event. If it *is* a state event, - # it might get rejected (in which case we'll need to persist it with the - # previous state group) - # - - if not state_group_before_event: - state_group_before_event = ( - await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=state_group_before_event_prev_group, - delta_ids=deltas_to_state_group_before_event, - current_state_ids=state_ids_before_event, + # We make sure that we have a state group assigned to the state. + if entry.state_group is None: + state_ids_before_event = await entry.get_state( + self._state_storage_controller, StateFilter.all() + ) + state_group_before_event = ( + await self._state_storage_controller.store_state_group( + event.event_id, + event.room_id, + prev_group=state_group_before_event_prev_group, + delta_ids=deltas_to_state_group_before_event, + current_state_ids=state_ids_before_event, + ) ) - ) - - # Assign the new state group to the cached state entry. - # - # Note that this can race in that we could generate multiple state - # groups for the same state entry, but that is just inefficient - # rather than dangerous. - if entry and entry.state_group is None: entry.state_group = state_group_before_event + else: + state_group_before_event = entry.state_group + state_ids_before_event = None # # now if it's not a state event, we're done @@ -313,6 +329,10 @@ class StateHandler: # # otherwise, we'll need to create a new state group for after the event # + if state_ids_before_event is None: + state_ids_before_event = await entry.get_state( + self._state_storage_controller, StateFilter.all() + ) key = (event.type, event.state_key) if key in state_ids_before_event: @@ -372,9 +392,6 @@ class StateHandler: state_group_ids_set = set(state_group_ids) if len(state_group_ids_set) == 1: (state_group_id,) = state_group_ids_set - state = await self._state_storage_controller.get_state_for_groups( - state_group_ids_set - ) ( prev_group, delta_ids, @@ -382,7 +399,7 @@ class StateHandler: state_group_id ) return _StateCacheEntry( - state=state[state_group_id], + state=None, state_group=state_group_id, prev_group=prev_group, delta_ids=delta_ids, diff --git a/synapse/storage/controllers/__init__.py b/synapse/storage/controllers/__init__.py index 55649719f6..45101cda7a 100644 --- a/synapse/storage/controllers/__init__.py +++ b/synapse/storage/controllers/__init__.py @@ -43,4 +43,6 @@ class StorageControllers: self.persistence = None if stores.persist_events: - self.persistence = EventsPersistenceStorageController(hs, stores) + self.persistence = EventsPersistenceStorageController( + hs, stores, self.state + ) diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index ea499ce0f8..af65e5913b 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -48,9 +48,11 @@ from synapse.events.snapshot import EventContext from synapse.logging import opentracing from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.controllers.state import StateStorageController from synapse.storage.databases import Databases from synapse.storage.databases.main.events import DeltaState from synapse.storage.databases.main.events_worker import EventRedactBehaviour +from synapse.storage.state import StateFilter from synapse.types import ( PersistedEventPosition, RoomStreamToken, @@ -308,7 +310,12 @@ class EventsPersistenceStorageController: current state and forward extremity changes. """ - def __init__(self, hs: "HomeServer", stores: Databases): + def __init__( + self, + hs: "HomeServer", + stores: Databases, + state_controller: StateStorageController, + ): # We ultimately want to split out the state store from the main store, # so we use separate variables here even though they point to the same # store for now. @@ -325,6 +332,7 @@ class EventsPersistenceStorageController: self._process_event_persist_queue_task ) self._state_resolution_handler = hs.get_state_resolution_handler() + self._state_controller = state_controller async def _process_event_persist_queue_task( self, @@ -504,7 +512,7 @@ class EventsPersistenceStorageController: state_res_store=StateResolutionStore(self.main_store), ) - return res.state + return await res.get_state(self._state_controller, StateFilter.all()) async def _persist_event_batch( self, _room_id: str, task: _PersistEventsTask diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 0b5e4e4254..71a65d565a 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -31,7 +31,6 @@ import attr from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase -from synapse.events.snapshot import EventContext from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import ( run_as_background_process, @@ -780,26 +779,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): return shared_room_ids or frozenset() - async def get_joined_users_from_context( - self, event: EventBase, context: EventContext - ) -> Dict[str, ProfileInfo]: - state_group: Union[object, int] = context.state_group - if not state_group: - # If state_group is None it means it has yet to be assigned a - # state group, i.e. we need to make sure that calls with a state_group - # of None don't hit previous cached calls with a None state_group. - # To do this we set the state_group to a new object as object() != object() - state_group = object() - - current_state_ids = await context.get_current_state_ids() - assert current_state_ids is not None - assert state_group is not None - return await self._get_joined_users_from_context( - event.room_id, state_group, current_state_ids, event=event, context=context - ) - async def get_joined_users_from_state( - self, room_id: str, state_entry: "_StateCacheEntry" + self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry" ) -> Dict[str, ProfileInfo]: state_group: Union[object, int] = state_entry.state_group if not state_group: @@ -812,18 +793,17 @@ class RoomMemberWorkerStore(EventsWorkerStore): assert state_group is not None with Measure(self._clock, "get_joined_users_from_state"): return await self._get_joined_users_from_context( - room_id, state_group, state_entry.state, context=state_entry + room_id, state_group, state, context=state_entry ) - @cached(num_args=2, cache_context=True, iterable=True, max_entries=100000) + @cached(num_args=2, iterable=True, max_entries=100000) async def _get_joined_users_from_context( self, room_id: str, state_group: Union[object, int], current_state_ids: StateMap[str], - cache_context: _CacheContext, event: Optional[EventBase] = None, - context: Optional[Union[EventContext, "_StateCacheEntry"]] = None, + context: Optional["_StateCacheEntry"] = None, ) -> Dict[str, ProfileInfo]: # We don't use `state_group`, it's there so that we can cache based # on it. However, it's important that it's never None, since two current_states @@ -1017,7 +997,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): ) async def get_joined_hosts( - self, room_id: str, state_entry: "_StateCacheEntry" + self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry" ) -> FrozenSet[str]: state_group: Union[object, int] = state_entry.state_group if not state_group: @@ -1030,7 +1010,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): assert state_group is not None with Measure(self._clock, "get_joined_hosts"): return await self._get_joined_hosts( - room_id, state_group, state_entry=state_entry + room_id, state_group, state, state_entry=state_entry ) @cached(num_args=2, max_entries=10000, iterable=True) @@ -1038,6 +1018,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): self, room_id: str, state_group: Union[object, int], + state: StateMap[str], state_entry: "_StateCacheEntry", ) -> FrozenSet[str]: # We don't use `state_group`, it's there so that we can cache based on @@ -1093,7 +1074,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): # The cache doesn't match the state group or prev state group, # so we calculate the result from first principles. joined_users = await self.get_joined_users_from_state( - room_id, state_entry + room_id, state, state_entry ) cache.hosts_to_joined_users = {} diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 1218786d79..240b02cb9f 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -23,7 +23,6 @@ from synapse.util import Clock from tests import unittest from tests.server import TestHomeServer -from tests.test_utils import event_injection class RoomMemberStoreTestCase(unittest.HomeserverTestCase): @@ -110,60 +109,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): # It now knows about Charlie's server. self.assertEqual(self.store._known_servers_count, 2) - def test_get_joined_users_from_context(self) -> None: - room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) - bob_event = self.get_success( - event_injection.inject_member_event( - self.hs, room, self.u_bob, Membership.JOIN - ) - ) - - # first, create a regular event - event, context = self.get_success( - event_injection.create_event( - self.hs, - room_id=room, - sender=self.u_alice, - prev_event_ids=[bob_event.event_id], - type="m.test.1", - content={}, - ) - ) - - users = self.get_success( - self.store.get_joined_users_from_context(event, context) - ) - self.assertEqual(users.keys(), {self.u_alice, self.u_bob}) - - # Regression test for #7376: create a state event whose key matches bob's - # user_id, but which is *not* a membership event, and persist that; then check - # that `get_joined_users_from_context` returns the correct users for the next event. - non_member_event = self.get_success( - event_injection.inject_event( - self.hs, - room_id=room, - sender=self.u_bob, - prev_event_ids=[bob_event.event_id], - type="m.test.2", - state_key=self.u_bob, - content={}, - ) - ) - event, context = self.get_success( - event_injection.create_event( - self.hs, - room_id=room, - sender=self.u_alice, - prev_event_ids=[non_member_event.event_id], - type="m.test.3", - content={}, - ) - ) - users = self.get_success( - self.store.get_joined_users_from_context(event, context) - ) - self.assertEqual(users.keys(), {self.u_alice, self.u_bob}) - def test__null_byte_in_display_name_properly_handled(self) -> None: room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) -- cgit 1.4.1 From df55b377bef7b23fa7245cb2942e47b6266f50af Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 14 Jul 2022 15:07:52 +0100 Subject: CHANGES.md: fix link to upgrade notes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index bcf9fae4a5..78345c75fc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse vNext ============= -As of this release, Synapse no longer allows the tasks of verifying email address ownership, and password reset confirmation, to be delegated to an identity server. For more information, see the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.63/docs/upgrade.md#upgrading-to-v1630). +As of this release, Synapse no longer allows the tasks of verifying email address ownership, and password reset confirmation, to be delegated to an identity server. For more information, see the [upgrade notes](https://matrix-org.github.io/synapse/v1.64/upgrade.html#upgrading-to-v1640). Synapse 1.63.0rc1 (2022-07-12) ============================== -- cgit 1.4.1 From fe15a865a5a5bc8e89d770e43dae702aa2a809cb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 14 Jul 2022 22:52:26 +0100 Subject: Rip out auth-event reconciliation code (#12943) There is a corner in `_check_event_auth` (long known as "the weird corner") where, if we get an event with auth_events which don't match those we were expecting, we attempt to resolve the diffence between our state and the remote's with a state resolution. This isn't specced, and there's general agreement we shouldn't be doing it. However, it turns out that the faster-joins code was relying on it, so we need to introduce something similar (but rather simpler) for that. --- changelog.d/12943.misc | 1 + synapse/handlers/federation_event.py | 277 ++++++++-------------------- synapse/state/__init__.py | 26 --- tests/handlers/test_federation.py | 140 +------------- tests/rest/client/test_third_party_rules.py | 11 +- tests/test_federation.py | 8 +- 6 files changed, 88 insertions(+), 375 deletions(-) create mode 100644 changelog.d/12943.misc diff --git a/changelog.d/12943.misc b/changelog.d/12943.misc new file mode 100644 index 0000000000..f66bb3ec32 --- /dev/null +++ b/changelog.d/12943.misc @@ -0,0 +1 @@ +Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index c74117c19a..b1dab57447 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import itertools import logging from http import HTTPStatus @@ -347,7 +348,7 @@ class FederationEventHandler: event.internal_metadata.send_on_behalf_of = origin context = await self._state_handler.compute_event_context(event) - context = await self._check_event_auth(origin, event, context) + await self._check_event_auth(origin, event, context) if context.rejected: raise SynapseError( 403, f"{event.membership} event was rejected", Codes.FORBIDDEN @@ -485,7 +486,7 @@ class FederationEventHandler: partial_state=partial_state, ) - context = await self._check_event_auth(origin, event, context) + await self._check_event_auth(origin, event, context) if context.rejected: raise SynapseError(400, "Join event was rejected") @@ -1116,11 +1117,7 @@ class FederationEventHandler: state_ids_before_event=state_ids, ) try: - context = await self._check_event_auth( - origin, - event, - context, - ) + await self._check_event_auth(origin, event, context) except AuthError as e: # This happens only if we couldn't find the auth events. We'll already have # logged a warning, so now we just convert to a FederationError. @@ -1495,11 +1492,8 @@ class FederationEventHandler: ) async def _check_event_auth( - self, - origin: str, - event: EventBase, - context: EventContext, - ) -> EventContext: + self, origin: str, event: EventBase, context: EventContext + ) -> None: """ Checks whether an event should be rejected (for failing auth checks). @@ -1509,9 +1503,6 @@ class FederationEventHandler: context: The event context. - Returns: - The updated context object. - Raises: AuthError if we were unable to find copies of the event's auth events. (Most other failures just cause us to set `context.rejected`.) @@ -1526,7 +1517,7 @@ class FederationEventHandler: logger.warning("While validating received event %r: %s", event, e) # TODO: use a different rejected reason here? context.rejected = RejectedReason.AUTH_ERROR - return context + return # next, check that we have all of the event's auth events. # @@ -1538,6 +1529,9 @@ class FederationEventHandler: ) # ... and check that the event passes auth at those auth events. + # https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu: + # 4. Passes authorization rules based on the event’s auth events, + # otherwise it is rejected. try: await check_state_independent_auth_rules(self._store, event) check_state_dependent_auth_rules(event, claimed_auth_events) @@ -1546,55 +1540,90 @@ class FederationEventHandler: "While checking auth of %r against auth_events: %s", event, e ) context.rejected = RejectedReason.AUTH_ERROR - return context + return + + # now check the auth rules pass against the room state before the event + # https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu: + # 5. Passes authorization rules based on the state before the event, + # otherwise it is rejected. + # + # ... however, if we only have partial state for the room, then there is a good + # chance that we'll be missing some of the state needed to auth the new event. + # So, we state-resolve the auth events that we are given against the state that + # we know about, which ensures things like bans are applied. (Note that we'll + # already have checked we have all the auth events, in + # _load_or_fetch_auth_events_for_event above) + if context.partial_state: + room_version = await self._store.get_room_version_id(event.room_id) + + local_state_id_map = await context.get_prev_state_ids() + claimed_auth_events_id_map = { + (ev.type, ev.state_key): ev.event_id for ev in claimed_auth_events + } + + state_for_auth_id_map = ( + await self._state_resolution_handler.resolve_events_with_store( + event.room_id, + room_version, + [local_state_id_map, claimed_auth_events_id_map], + event_map=None, + state_res_store=StateResolutionStore(self._store), + ) + ) + else: + event_types = event_auth.auth_types_for_event(event.room_version, event) + state_for_auth_id_map = await context.get_prev_state_ids( + StateFilter.from_types(event_types) + ) - # now check auth against what we think the auth events *should* be. - event_types = event_auth.auth_types_for_event(event.room_version, event) - prev_state_ids = await context.get_prev_state_ids( - StateFilter.from_types(event_types) + calculated_auth_event_ids = self._event_auth_handler.compute_auth_events( + event, state_for_auth_id_map, for_verification=True ) - auth_events_ids = self._event_auth_handler.compute_auth_events( - event, prev_state_ids, for_verification=True + # if those are the same, we're done here. + if collections.Counter(event.auth_event_ids()) == collections.Counter( + calculated_auth_event_ids + ): + return + + # otherwise, re-run the auth checks based on what we calculated. + calculated_auth_events = await self._store.get_events_as_list( + calculated_auth_event_ids ) - auth_events_x = await self._store.get_events(auth_events_ids) + + # log the differences + + claimed_auth_event_map = {(e.type, e.state_key): e for e in claimed_auth_events} calculated_auth_event_map = { - (e.type, e.state_key): e for e in auth_events_x.values() + (e.type, e.state_key): e for e in calculated_auth_events } + logger.info( + "event's auth_events are different to our calculated auth_events. " + "Claimed but not calculated: %s. Calculated but not claimed: %s", + [ + ev + for k, ev in claimed_auth_event_map.items() + if k not in calculated_auth_event_map + or calculated_auth_event_map[k].event_id != ev.event_id + ], + [ + ev + for k, ev in calculated_auth_event_map.items() + if k not in claimed_auth_event_map + or claimed_auth_event_map[k].event_id != ev.event_id + ], + ) try: - updated_auth_events = await self._update_auth_events_for_auth( + check_state_dependent_auth_rules(event, calculated_auth_events) + except AuthError as e: + logger.warning( + "While checking auth of %r against room state before the event: %s", event, - calculated_auth_event_map=calculated_auth_event_map, - ) - except Exception: - # We don't really mind if the above fails, so lets not fail - # processing if it does. However, it really shouldn't fail so - # let's still log as an exception since we'll still want to fix - # any bugs. - logger.exception( - "Failed to double check auth events for %s with remote. " - "Ignoring failure and continuing processing of event.", - event.event_id, - ) - updated_auth_events = None - - if updated_auth_events: - context = await self._update_context_for_auth_events( - event, context, updated_auth_events + e, ) - auth_events_for_auth = updated_auth_events - else: - auth_events_for_auth = calculated_auth_event_map - - try: - check_state_dependent_auth_rules(event, auth_events_for_auth.values()) - except AuthError as e: - logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR - return context - async def _maybe_kick_guest_users(self, event: EventBase) -> None: if event.type != EventTypes.GuestAccess: return @@ -1704,93 +1733,6 @@ class FederationEventHandler: soft_failed_event_counter.inc() event.internal_metadata.soft_failed = True - async def _update_auth_events_for_auth( - self, - event: EventBase, - calculated_auth_event_map: StateMap[EventBase], - ) -> Optional[StateMap[EventBase]]: - """Helper for _check_event_auth. See there for docs. - - Checks whether a given event has the expected auth events. If it - doesn't then we talk to the remote server to compare state to see if - we can come to a consensus (e.g. if one server missed some valid - state). - - This attempts to resolve any potential divergence of state between - servers, but is not essential and so failures should not block further - processing of the event. - - Args: - event: - - calculated_auth_event_map: - Our calculated auth_events based on the state of the room - at the event's position in the DAG. - - Returns: - updated auth event map, or None if no changes are needed. - - """ - assert not event.internal_metadata.outlier - - # check for events which are in the event's claimed auth_events, but not - # in our calculated event map. - event_auth_events = set(event.auth_event_ids()) - different_auth = event_auth_events.difference( - e.event_id for e in calculated_auth_event_map.values() - ) - - if not different_auth: - return None - - logger.info( - "auth_events refers to events which are not in our calculated auth " - "chain: %s", - different_auth, - ) - - # XXX: currently this checks for redactions but I'm not convinced that is - # necessary? - different_events = await self._store.get_events_as_list(different_auth) - - # double-check they're all in the same room - we should already have checked - # this but it doesn't hurt to check again. - for d in different_events: - assert ( - d.room_id == event.room_id - ), f"Event {event.event_id} refers to auth_event {d.event_id} which is in a different room" - - # now we state-resolve between our own idea of the auth events, and the remote's - # idea of them. - - local_state = calculated_auth_event_map.values() - remote_auth_events = dict(calculated_auth_event_map) - remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) - remote_state = remote_auth_events.values() - - room_version = await self._store.get_room_version_id(event.room_id) - new_state = await self._state_handler.resolve_events( - room_version, (local_state, remote_state), event - ) - different_state = { - (d.type, d.state_key): d - for d in new_state.values() - if calculated_auth_event_map.get((d.type, d.state_key)) != d - } - if not different_state: - logger.info("State res returned no new state") - return None - - logger.info( - "After state res: updating auth_events with new state %s", - different_state.values(), - ) - - # take a copy of calculated_auth_event_map before we modify it. - auth_events = dict(calculated_auth_event_map) - auth_events.update(different_state) - return auth_events - async def _load_or_fetch_auth_events_for_event( self, destination: str, event: EventBase ) -> Collection[EventBase]: @@ -1888,61 +1830,6 @@ class FederationEventHandler: await self._auth_and_persist_outliers(room_id, remote_auth_events) - async def _update_context_for_auth_events( - self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] - ) -> EventContext: - """Update the state_ids in an event context after auth event resolution, - storing the changes as a new state group. - - Args: - event: The event we're handling the context for - - context: initial event context - - auth_events: Events to update in the event context. - - Returns: - new event context - """ - # exclude the state key of the new event from the current_state in the context. - if event.is_state(): - event_key: Optional[Tuple[str, str]] = (event.type, event.state_key) - else: - event_key = None - state_updates = { - k: a.event_id for k, a in auth_events.items() if k != event_key - } - - current_state_ids = await context.get_current_state_ids() - current_state_ids = dict(current_state_ids) # type: ignore - - current_state_ids.update(state_updates) - - prev_state_ids = await context.get_prev_state_ids() - prev_state_ids = dict(prev_state_ids) - - prev_state_ids.update({k: a.event_id for k, a in auth_events.items()}) - - # create a new state group as a delta from the existing one. - prev_group = context.state_group - state_group = await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=prev_group, - delta_ids=state_updates, - current_state_ids=current_state_ids, - ) - - return EventContext.with_state( - storage=self._storage_controllers, - state_group=state_group, - state_group_before_event=context.state_group_before_event, - state_delta_due_to_event=state_updates, - prev_group=prev_group, - delta_ids=state_updates, - partial_state=context.partial_state, - ) - async def _run_push_actions_and_persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False ) -> None: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9f0a36652c..dcd272034d 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -24,7 +24,6 @@ from typing import ( DefaultDict, Dict, FrozenSet, - Iterable, List, Mapping, Optional, @@ -422,31 +421,6 @@ class StateHandler: ) return result - async def resolve_events( - self, - room_version: str, - state_sets: Collection[Iterable[EventBase]], - event: EventBase, - ) -> StateMap[EventBase]: - logger.info( - "Resolving state for %s with %d groups", event.room_id, len(state_sets) - ) - state_set_ids = [ - {(ev.type, ev.state_key): ev.event_id for ev in st} for st in state_sets - ] - - state_map = {ev.event_id: ev for st in state_sets for ev in st} - - new_state = await self._state_resolution_handler.resolve_events_with_store( - event.room_id, - room_version, - state_set_ids, - event_map=state_map, - state_res_store=StateResolutionStore(self.store), - ) - - return {key: state_map[ev_id] for key, ev_id in new_state.items()} - async def update_current_state(self, room_id: str) -> None: """Recalculates the current state for a room, and persists it. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 712933f9ca..8a0bb91f40 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import List, cast +from typing import cast from unittest import TestCase from twisted.test.proto_helpers import MemoryReactor @@ -50,8 +50,6 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): hs = self.setup_test_homeserver(federation_http_client=None) self.handler = hs.get_federation_handler() self.store = hs.get_datastores().main - self.state_storage_controller = hs.get_storage_controllers().state - self._event_auth_handler = hs.get_event_auth_handler() return hs def test_exchange_revoked_invite(self) -> None: @@ -314,142 +312,6 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): ) self.get_success(d) - def test_backfill_floating_outlier_membership_auth(self) -> None: - """ - As the local homeserver, check that we can properly process a federated - event from the OTHER_SERVER with auth_events that include a floating - membership event from the OTHER_SERVER. - - Regression test, see #10439. - """ - OTHER_SERVER = "otherserver" - OTHER_USER = "@otheruser:" + OTHER_SERVER - - # create the room - user_id = self.register_user("kermit", "test") - tok = self.login("kermit", "test") - room_id = self.helper.create_room_as( - room_creator=user_id, - is_public=True, - tok=tok, - extra_content={ - "preset": "public_chat", - }, - ) - room_version = self.get_success(self.store.get_room_version(room_id)) - - prev_event_ids = self.get_success(self.store.get_prev_events_for_room(room_id)) - ( - most_recent_prev_event_id, - most_recent_prev_event_depth, - ) = self.get_success(self.store.get_max_depth_of(prev_event_ids)) - # mapping from (type, state_key) -> state_event_id - assert most_recent_prev_event_id is not None - prev_state_map = self.get_success( - self.state_storage_controller.get_state_ids_for_event( - most_recent_prev_event_id - ) - ) - # List of state event ID's - prev_state_ids = list(prev_state_map.values()) - auth_event_ids = prev_state_ids - auth_events = list( - self.get_success(self.store.get_events(auth_event_ids)).values() - ) - - # build a floating outlier member state event - fake_prev_event_id = "$" + random_string(43) - member_event_dict = { - "type": EventTypes.Member, - "content": { - "membership": "join", - }, - "state_key": OTHER_USER, - "room_id": room_id, - "sender": OTHER_USER, - "depth": most_recent_prev_event_depth, - "prev_events": [fake_prev_event_id], - "origin_server_ts": self.clock.time_msec(), - "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, - } - builder = self.hs.get_event_builder_factory().for_room_version( - room_version, member_event_dict - ) - member_event = self.get_success( - builder.build( - prev_event_ids=member_event_dict["prev_events"], - auth_event_ids=self._event_auth_handler.compute_auth_events( - builder, - prev_state_map, - for_verification=False, - ), - depth=member_event_dict["depth"], - ) - ) - # Override the signature added from "test" homeserver that we created the event with - member_event.signatures = member_event_dict["signatures"] - - # Add the new member_event to the StateMap - updated_state_map = dict(prev_state_map) - updated_state_map[ - (member_event.type, member_event.state_key) - ] = member_event.event_id - auth_events.append(member_event) - - # build and send an event authed based on the member event - message_event_dict = { - "type": EventTypes.Message, - "content": {}, - "room_id": room_id, - "sender": OTHER_USER, - "depth": most_recent_prev_event_depth, - "prev_events": prev_event_ids.copy(), - "origin_server_ts": self.clock.time_msec(), - "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, - } - builder = self.hs.get_event_builder_factory().for_room_version( - room_version, message_event_dict - ) - message_event = self.get_success( - builder.build( - prev_event_ids=message_event_dict["prev_events"], - auth_event_ids=self._event_auth_handler.compute_auth_events( - builder, - updated_state_map, - for_verification=False, - ), - depth=message_event_dict["depth"], - ) - ) - # Override the signature added from "test" homeserver that we created the event with - message_event.signatures = message_event_dict["signatures"] - - # Stub the /event_auth response from the OTHER_SERVER - async def get_event_auth( - destination: str, room_id: str, event_id: str - ) -> List[EventBase]: - return [ - event_from_pdu_json(ae.get_pdu_json(), room_version=room_version) - for ae in auth_events - ] - - self.handler.federation_client.get_event_auth = get_event_auth # type: ignore[assignment] - - with LoggingContext("receive_pdu"): - # Fake the OTHER_SERVER federating the message event over to our local homeserver - d = run_in_background( - self.hs.get_federation_event_handler().on_receive_pdu, - OTHER_SERVER, - message_event, - ) - self.get_success(d) - - # Now try and get the events on our local homeserver - stored_event = self.get_success( - self.store.get_event(message_event.event_id, allow_none=True) - ) - self.assertTrue(stored_event is not None) - @unittest.override_config( {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}} ) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 5eb0f243f7..9a48e9286f 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -21,7 +21,6 @@ from synapse.api.constants import EventTypes, LoginType, Membership from synapse.api.errors import SynapseError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase -from synapse.events.snapshot import EventContext from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.rest import admin from synapse.rest.client import account, login, profile, room @@ -113,14 +112,8 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Have this homeserver skip event auth checks. This is necessary due to # event auth checks ensuring that events were signed by the sender's homeserver. - async def _check_event_auth( - origin: str, - event: EventBase, - context: EventContext, - *args: Any, - **kwargs: Any, - ) -> EventContext: - return context + async def _check_event_auth(origin: Any, event: Any, context: Any) -> None: + pass hs.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[assignment] diff --git a/tests/test_federation.py b/tests/test_federation.py index 0cbef70bfa..779fad1f63 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -81,12 +81,8 @@ class MessageAcceptTests(unittest.HomeserverTestCase): self.handler = self.homeserver.get_federation_handler() federation_event_handler = self.homeserver.get_federation_event_handler() - async def _check_event_auth( - origin, - event, - context, - ): - return context + async def _check_event_auth(origin, event, context): + pass federation_event_handler._check_event_auth = _check_event_auth self.client = self.homeserver.get_federation_client() -- cgit 1.4.1 From 21eeacc99551febcddcef21db96a2bd82166fc7e Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Fri, 15 Jul 2022 10:36:56 +0200 Subject: Federation Sender & Appservice Pusher Stream Optimisations (#13251) * Replace `get_new_events_for_appservice` with `get_all_new_events_stream` The functions were near identical and this brings the AS worker closer to the way federation senders work which can allow for multiple workers to handle AS traffic. * Pull received TS alongside events when processing the stream This avoids an extra query -per event- when both federation sender and appservice pusher process events. --- changelog.d/13251.misc | 1 + synapse/federation/sender/__init__.py | 10 +++-- synapse/handlers/appservice.py | 11 ++--- synapse/storage/databases/main/appservice.py | 58 ++++++++----------------- synapse/storage/databases/main/events_worker.py | 19 -------- synapse/storage/databases/main/stream.py | 32 +++++++++----- tests/handlers/test_appservice.py | 16 +++---- 7 files changed, 60 insertions(+), 87 deletions(-) create mode 100644 changelog.d/13251.misc diff --git a/changelog.d/13251.misc b/changelog.d/13251.misc new file mode 100644 index 0000000000..526369e403 --- /dev/null +++ b/changelog.d/13251.misc @@ -0,0 +1 @@ +Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 99a794c042..94a65ac65f 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -351,7 +351,11 @@ class FederationSender(AbstractFederationSender): self._is_processing = True while True: last_token = await self.store.get_federation_out_pos("events") - next_token, events = await self.store.get_all_new_events_stream( + ( + next_token, + events, + event_to_received_ts, + ) = await self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100 ) @@ -476,7 +480,7 @@ class FederationSender(AbstractFederationSender): await self._send_pdu(event, sharded_destinations) now = self.clock.time_msec() - ts = await self.store.get_received_ts(event.event_id) + ts = event_to_received_ts[event.event_id] assert ts is not None synapse.metrics.event_processing_lag_by_event.labels( "federation_sender" @@ -509,7 +513,7 @@ class FederationSender(AbstractFederationSender): if events: now = self.clock.time_msec() - ts = await self.store.get_received_ts(events[-1].event_id) + ts = event_to_received_ts[events[-1].event_id] assert ts is not None synapse.metrics.event_processing_lag.labels( diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 814553e098..203b62e015 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -104,14 +104,15 @@ class ApplicationServicesHandler: with Measure(self.clock, "notify_interested_services"): self.is_processing = True try: - limit = 100 upper_bound = -1 while upper_bound < self.current_max: + last_token = await self.store.get_appservice_last_pos() ( upper_bound, events, - ) = await self.store.get_new_events_for_appservice( - self.current_max, limit + event_to_received_ts, + ) = await self.store.get_all_new_events_stream( + last_token, self.current_max, limit=100, get_prev_content=True ) events_by_room: Dict[str, List[EventBase]] = {} @@ -150,7 +151,7 @@ class ApplicationServicesHandler: ) now = self.clock.time_msec() - ts = await self.store.get_received_ts(event.event_id) + ts = event_to_received_ts[event.event_id] assert ts is not None synapse.metrics.event_processing_lag_by_event.labels( @@ -187,7 +188,7 @@ class ApplicationServicesHandler: if events: now = self.clock.time_msec() - ts = await self.store.get_received_ts(events[-1].event_id) + ts = event_to_received_ts[events[-1].event_id] assert ts is not None synapse.metrics.event_processing_lag.labels( diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index e284454b66..64b70a7b28 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -371,52 +371,30 @@ class ApplicationServiceTransactionWorkerStore( device_list_summary=DeviceListUpdates(), ) - async def set_appservice_last_pos(self, pos: int) -> None: - def set_appservice_last_pos_txn(txn: LoggingTransaction) -> None: - txn.execute( - "UPDATE appservice_stream_position SET stream_ordering = ?", (pos,) - ) + async def get_appservice_last_pos(self) -> int: + """ + Get the last stream ordering position for the appservice process. + """ - await self.db_pool.runInteraction( - "set_appservice_last_pos", set_appservice_last_pos_txn + return await self.db_pool.simple_select_one_onecol( + table="appservice_stream_position", + retcol="stream_ordering", + keyvalues={}, + desc="get_appservice_last_pos", ) - async def get_new_events_for_appservice( - self, current_id: int, limit: int - ) -> Tuple[int, List[EventBase]]: - """Get all new events for an appservice""" - - def get_new_events_for_appservice_txn( - txn: LoggingTransaction, - ) -> Tuple[int, List[str]]: - sql = ( - "SELECT e.stream_ordering, e.event_id" - " FROM events AS e" - " WHERE" - " (SELECT stream_ordering FROM appservice_stream_position)" - " < e.stream_ordering" - " AND e.stream_ordering <= ?" - " ORDER BY e.stream_ordering ASC" - " LIMIT ?" - ) - - txn.execute(sql, (current_id, limit)) - rows = txn.fetchall() - - upper_bound = current_id - if len(rows) == limit: - upper_bound = rows[-1][0] - - return upper_bound, [row[1] for row in rows] + async def set_appservice_last_pos(self, pos: int) -> None: + """ + Set the last stream ordering position for the appservice process. + """ - upper_bound, event_ids = await self.db_pool.runInteraction( - "get_new_events_for_appservice", get_new_events_for_appservice_txn + await self.db_pool.simple_update_one( + table="appservice_stream_position", + keyvalues={}, + updatevalues={"stream_ordering": pos}, + desc="set_appservice_last_pos", ) - events = await self.get_events_as_list(event_ids, get_prev_content=True) - - return upper_bound, events - async def get_type_stream_id_for_appservice( self, service: ApplicationService, type: str ) -> int: diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index b99b107784..621f92e238 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -292,25 +292,6 @@ class EventsWorkerStore(SQLBaseStore): super().process_replication_rows(stream_name, instance_name, token, rows) - async def get_received_ts(self, event_id: str) -> Optional[int]: - """Get received_ts (when it was persisted) for the event. - - Raises an exception for unknown events. - - Args: - event_id: The event ID to query. - - Returns: - Timestamp in milliseconds, or None for events that were persisted - before received_ts was implemented. - """ - return await self.db_pool.simple_select_one_onecol( - table="events", - keyvalues={"event_id": event_id}, - retcol="received_ts", - desc="get_received_ts", - ) - async def have_censored_event(self, event_id: str) -> bool: """Check if an event has been censored, i.e. if the content of the event has been erased from the database due to a redaction. diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 3a1df7776c..2590b52f73 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1022,8 +1022,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): } async def get_all_new_events_stream( - self, from_id: int, current_id: int, limit: int - ) -> Tuple[int, List[EventBase]]: + self, from_id: int, current_id: int, limit: int, get_prev_content: bool = False + ) -> Tuple[int, List[EventBase], Dict[str, Optional[int]]]: """Get all new events Returns all events with from_id < stream_ordering <= current_id. @@ -1032,19 +1032,21 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): from_id: the stream_ordering of the last event we processed current_id: the stream_ordering of the most recently processed event limit: the maximum number of events to return + get_prev_content: whether to fetch previous event content Returns: - A tuple of (next_id, events), where `next_id` is the next value to - pass as `from_id` (it will either be the stream_ordering of the - last returned event, or, if fewer than `limit` events were found, - the `current_id`). + A tuple of (next_id, events, event_to_received_ts), where `next_id` + is the next value to pass as `from_id` (it will either be the + stream_ordering of the last returned event, or, if fewer than `limit` + events were found, the `current_id`). The `event_to_received_ts` is + a dictionary mapping event ID to the event `received_ts`. """ def get_all_new_events_stream_txn( txn: LoggingTransaction, - ) -> Tuple[int, List[str]]: + ) -> Tuple[int, Dict[str, Optional[int]]]: sql = ( - "SELECT e.stream_ordering, e.event_id" + "SELECT e.stream_ordering, e.event_id, e.received_ts" " FROM events AS e" " WHERE" " ? < e.stream_ordering AND e.stream_ordering <= ?" @@ -1059,15 +1061,21 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if len(rows) == limit: upper_bound = rows[-1][0] - return upper_bound, [row[1] for row in rows] + event_to_received_ts: Dict[str, Optional[int]] = { + row[1]: row[2] for row in rows + } + return upper_bound, event_to_received_ts - upper_bound, event_ids = await self.db_pool.runInteraction( + upper_bound, event_to_received_ts = await self.db_pool.runInteraction( "get_all_new_events_stream", get_all_new_events_stream_txn ) - events = await self.get_events_as_list(event_ids) + events = await self.get_events_as_list( + event_to_received_ts.keys(), + get_prev_content=get_prev_content, + ) - return upper_bound, events + return upper_bound, events, event_to_received_ts async def get_federation_out_pos(self, typ: str) -> int: if self._need_to_reset_federation_stream_positions: diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index d96d5aa138..b17af2725b 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -50,7 +50,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): self.mock_scheduler = Mock() hs = Mock() hs.get_datastores.return_value = Mock(main=self.mock_store) - self.mock_store.get_received_ts.return_value = make_awaitable(0) + self.mock_store.get_appservice_last_pos.return_value = make_awaitable(None) self.mock_store.set_appservice_last_pos.return_value = make_awaitable(None) self.mock_store.set_appservice_stream_type_pos.return_value = make_awaitable( None @@ -76,9 +76,9 @@ class AppServiceHandlerTestCase(unittest.TestCase): event = Mock( sender="@someone:anywhere", type="m.room.message", room_id="!foo:bar" ) - self.mock_store.get_new_events_for_appservice.side_effect = [ - make_awaitable((0, [])), - make_awaitable((1, [event])), + self.mock_store.get_all_new_events_stream.side_effect = [ + make_awaitable((0, [], {})), + make_awaitable((1, [event], {event.event_id: 0})), ] self.handler.notify_interested_services(RoomStreamToken(None, 1)) @@ -95,8 +95,8 @@ class AppServiceHandlerTestCase(unittest.TestCase): event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar") self.mock_as_api.query_user.return_value = make_awaitable(True) - self.mock_store.get_new_events_for_appservice.side_effect = [ - make_awaitable((0, [event])), + self.mock_store.get_all_new_events_stream.side_effect = [ + make_awaitable((0, [event], {event.event_id: 0})), ] self.handler.notify_interested_services(RoomStreamToken(None, 0)) @@ -112,8 +112,8 @@ class AppServiceHandlerTestCase(unittest.TestCase): event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar") self.mock_as_api.query_user.return_value = make_awaitable(True) - self.mock_store.get_new_events_for_appservice.side_effect = [ - make_awaitable((0, [event])), + self.mock_store.get_all_new_events_stream.side_effect = [ + make_awaitable((0, [event], {event.event_id: 0})), ] self.handler.notify_interested_services(RoomStreamToken(None, 0)) -- cgit 1.4.1 From cc21a431f3bdb353427c3242e49b1941a51175b3 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Fri, 15 Jul 2022 11:30:46 +0200 Subject: Async get event cache prep (#13242) Some experimental prep work to enable external event caching based on #9379 & #12955. Doesn't actually move the cache at all, just lays the groundwork for async implemented caches. Signed off by Nick @ Beeper (@Fizzadar) --- changelog.d/13242.misc | 1 + synapse/storage/database.py | 10 +++--- synapse/storage/databases/main/cache.py | 7 ++-- synapse/storage/databases/main/events.py | 4 +-- synapse/storage/databases/main/events_worker.py | 34 +++++++++++++------ synapse/storage/databases/main/purge_events.py | 2 +- synapse/storage/databases/main/roommember.py | 4 ++- synapse/util/caches/lrucache.py | 38 ++++++++++++++++++++++ tests/handlers/test_sync.py | 2 +- tests/storage/databases/main/test_events_worker.py | 8 ++--- tests/storage/test_purge.py | 2 +- 11 files changed, 86 insertions(+), 26 deletions(-) create mode 100644 changelog.d/13242.misc diff --git a/changelog.d/13242.misc b/changelog.d/13242.misc new file mode 100644 index 0000000000..7f8ec0815f --- /dev/null +++ b/changelog.d/13242.misc @@ -0,0 +1 @@ +Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/storage/database.py b/synapse/storage/database.py index e21ab08515..6a6d0dcd73 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -57,7 +57,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor -from synapse.util.async_helpers import delay_cancellation +from synapse.util.async_helpers import delay_cancellation, maybe_awaitable from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -818,12 +818,14 @@ class DatabasePool: ) for after_callback, after_args, after_kwargs in after_callbacks: - after_callback(*after_args, **after_kwargs) + await maybe_awaitable(after_callback(*after_args, **after_kwargs)) return cast(R, result) except Exception: - for after_callback, after_args, after_kwargs in exception_callbacks: - after_callback(*after_args, **after_kwargs) + for exception_callback, after_args, after_kwargs in exception_callbacks: + await maybe_awaitable( + exception_callback(*after_args, **after_kwargs) + ) raise # To handle cancellation, we ensure that `after_callback`s and diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 1653a6a9b6..2367ddeea3 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -193,7 +193,10 @@ class CacheInvalidationWorkerStore(SQLBaseStore): relates_to: Optional[str], backfilled: bool, ) -> None: - self._invalidate_get_event_cache(event_id) + # This invalidates any local in-memory cached event objects, the original + # process triggering the invalidation is responsible for clearing any external + # cached objects. + self._invalidate_local_get_event_cache(event_id) self.have_seen_event.invalidate((room_id, event_id)) self.get_latest_event_ids_in_room.invalidate((room_id,)) @@ -208,7 +211,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._events_stream_cache.entity_has_changed(room_id, stream_ordering) if redacts: - self._invalidate_get_event_cache(redacts) + self._invalidate_local_get_event_cache(redacts) # Caches which might leak edits must be invalidated for the event being # redacted. self.get_relations_for_event.invalidate((redacts,)) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index eb4efbb93c..fa2266ba20 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1669,9 +1669,9 @@ class PersistEventsStore: if not row["rejects"] and not row["redacts"]: to_prefill.append(EventCacheEntry(event=event, redacted_event=None)) - def prefill() -> None: + async def prefill() -> None: for cache_entry in to_prefill: - self.store._get_event_cache.set( + await self.store._get_event_cache.set( (cache_entry.event.event_id,), cache_entry ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 621f92e238..f3935bfead 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -79,7 +79,7 @@ from synapse.types import JsonDict, get_domain_from_id from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred, delay_cancellation from synapse.util.caches.descriptors import cached, cachedList -from synapse.util.caches.lrucache import LruCache +from synapse.util.caches.lrucache import AsyncLruCache from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -238,7 +238,9 @@ class EventsWorkerStore(SQLBaseStore): 5 * 60 * 1000, ) - self._get_event_cache: LruCache[Tuple[str], EventCacheEntry] = LruCache( + self._get_event_cache: AsyncLruCache[ + Tuple[str], EventCacheEntry + ] = AsyncLruCache( cache_name="*getEvent*", max_size=hs.config.caches.event_cache_size, ) @@ -598,7 +600,7 @@ class EventsWorkerStore(SQLBaseStore): Returns: map from event id to result """ - event_entry_map = self._get_events_from_cache( + event_entry_map = await self._get_events_from_cache( event_ids, ) @@ -710,12 +712,22 @@ class EventsWorkerStore(SQLBaseStore): return event_entry_map - def _invalidate_get_event_cache(self, event_id: str) -> None: - self._get_event_cache.invalidate((event_id,)) + async def _invalidate_get_event_cache(self, event_id: str) -> None: + # First we invalidate the asynchronous cache instance. This may include + # out-of-process caches such as Redis/memcache. Once complete we can + # invalidate any in memory cache. The ordering is important here to + # ensure we don't pull in any remote invalid value after we invalidate + # the in-memory cache. + await self._get_event_cache.invalidate((event_id,)) self._event_ref.pop(event_id, None) self._current_event_fetches.pop(event_id, None) - def _get_events_from_cache( + def _invalidate_local_get_event_cache(self, event_id: str) -> None: + self._get_event_cache.invalidate_local((event_id,)) + self._event_ref.pop(event_id, None) + self._current_event_fetches.pop(event_id, None) + + async def _get_events_from_cache( self, events: Iterable[str], update_metrics: bool = True ) -> Dict[str, EventCacheEntry]: """Fetch events from the caches. @@ -730,7 +742,7 @@ class EventsWorkerStore(SQLBaseStore): for event_id in events: # First check if it's in the event cache - ret = self._get_event_cache.get( + ret = await self._get_event_cache.get( (event_id,), None, update_metrics=update_metrics ) if ret: @@ -752,7 +764,7 @@ class EventsWorkerStore(SQLBaseStore): # We add the entry back into the cache as we want to keep # recently queried events in the cache. - self._get_event_cache.set((event_id,), cache_entry) + await self._get_event_cache.set((event_id,), cache_entry) return event_map @@ -1129,7 +1141,7 @@ class EventsWorkerStore(SQLBaseStore): event=original_ev, redacted_event=redacted_event ) - self._get_event_cache.set((event_id,), cache_entry) + await self._get_event_cache.set((event_id,), cache_entry) result_map[event_id] = cache_entry if not redacted_event: @@ -1363,7 +1375,9 @@ class EventsWorkerStore(SQLBaseStore): # if the event cache contains the event, obviously we've seen it. cache_results = { - (rid, eid) for (rid, eid) in keys if self._get_event_cache.contains((eid,)) + (rid, eid) + for (rid, eid) in keys + if await self._get_event_cache.contains((eid,)) } results = dict.fromkeys(cache_results, True) remaining = [k for k in keys if k not in cache_results] diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 87b0d09039..549ce07c16 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -302,7 +302,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): self._invalidate_cache_and_stream( txn, self.have_seen_event, (room_id, event_id) ) - self._invalidate_get_event_cache(event_id) + txn.call_after(self._invalidate_get_event_cache, event_id) logger.info("[purge] done") diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 71a65d565a..105a518677 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -843,7 +843,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): # We don't update the event cache hit ratio as it completely throws off # the hit ratio counts. After all, we don't populate the cache if we # miss it here - event_map = self._get_events_from_cache(member_event_ids, update_metrics=False) + event_map = await self._get_events_from_cache( + member_event_ids, update_metrics=False + ) missing_member_event_ids = [] for event_id in member_event_ids: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 8ed5325c5d..31f41fec82 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -730,3 +730,41 @@ class LruCache(Generic[KT, VT]): # This happens e.g. in the sync code where we have an expiring cache of # lru caches. self.clear() + + +class AsyncLruCache(Generic[KT, VT]): + """ + An asynchronous wrapper around a subset of the LruCache API. + + On its own this doesn't change the behaviour but allows subclasses that + utilize external cache systems that require await behaviour to be created. + """ + + def __init__(self, *args, **kwargs): # type: ignore + self._lru_cache: LruCache[KT, VT] = LruCache(*args, **kwargs) + + async def get( + self, key: KT, default: Optional[T] = None, update_metrics: bool = True + ) -> Optional[VT]: + return self._lru_cache.get(key, update_metrics=update_metrics) + + async def set(self, key: KT, value: VT) -> None: + self._lru_cache.set(key, value) + + async def invalidate(self, key: KT) -> None: + # This method should invalidate any external cache and then invalidate the LruCache. + return self._lru_cache.invalidate(key) + + def invalidate_local(self, key: KT) -> None: + """Remove an entry from the local cache + + This variant of `invalidate` is useful if we know that the external + cache has already been invalidated. + """ + return self._lru_cache.invalidate(key) + + async def contains(self, key: KT) -> bool: + return self._lru_cache.contains(key) + + async def clear(self) -> None: + self._lru_cache.clear() diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index ecc7cc6461..e3f38fbcc5 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -159,7 +159,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) self.store._event_ref.clear() # The rooms should be excluded from the sync response. diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 38963ce4a7..46d829b062 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -143,7 +143,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): self.event_id = res["event_id"] # Reset the event cache so the tests start with it empty - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) def test_simple(self): """Test that we cache events that we pull from the DB.""" @@ -160,7 +160,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): """ # Reset the event cache - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) with LoggingContext("test") as ctx: # We keep hold of the event event though we never use it. @@ -170,7 +170,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1) # Reset the event cache - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) with LoggingContext("test") as ctx: self.get_success(self.store.get_event(self.event_id)) @@ -345,7 +345,7 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase): self.event_id = res["event_id"] # Reset the event cache so the tests start with it empty - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) @contextmanager def blocking_get_event_calls( diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 8dfaa0559b..9c1182ed16 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -115,6 +115,6 @@ class PurgeTests(HomeserverTestCase): ) # The events aren't found. - self.store._invalidate_get_event_cache(create_event.event_id) + self.store._invalidate_local_get_event_cache(create_event.event_id) self.get_failure(self.store.get_event(create_event.event_id), NotFoundError) self.get_failure(self.store.get_event(first["event_id"]), NotFoundError) -- cgit 1.4.1 From 512486bbeb606ae989fe26d7b39074096a70b32c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 15 Jul 2022 11:13:40 +0100 Subject: Docker: copy postgres from base image (#13279) When building the docker images for complement testing, copy a preinstalled complement over from a base image, rather than apt installing it. This avoids network traffic and is much faster. --- changelog.d/13279.misc | 1 + docker/complement/Dockerfile | 82 ++++++++++++++---------- docker/complement/conf/postgres.supervisord.conf | 2 +- 3 files changed, 51 insertions(+), 34 deletions(-) create mode 100644 changelog.d/13279.misc diff --git a/changelog.d/13279.misc b/changelog.d/13279.misc new file mode 100644 index 0000000000..a083d2af2a --- /dev/null +++ b/changelog.d/13279.misc @@ -0,0 +1 @@ +Reduce the rebuild time for the complement-synapse docker image. diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 8bec0f6116..c5e7984a28 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -4,42 +4,58 @@ # # Instructions for building this image from those it depends on is detailed in this guide: # https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse -ARG SYNAPSE_VERSION=latest -FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION - -# Install postgresql -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -yqq postgresql-13 - -# Configure a user and create a database for Synapse -RUN pg_ctlcluster 13 main start && su postgres -c "echo \ - \"ALTER USER postgres PASSWORD 'somesecret'; \ - CREATE DATABASE synapse \ - ENCODING 'UTF8' \ - LC_COLLATE='C' \ - LC_CTYPE='C' \ - template=template0;\" | psql" && pg_ctlcluster 13 main stop -# Extend the shared homeserver config to disable rate-limiting, -# set Complement's static shared secret, enable registration, amongst other -# tweaks to get Synapse ready for testing. -# To do this, we copy the old template out of the way and then include it -# with Jinja2. -RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2 -COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2 - -WORKDIR /data +ARG SYNAPSE_VERSION=latest -COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf +# first of all, we create a base image with a postgres server and database, +# which we can copy into the target image. For repeated rebuilds, this is +# much faster than apt installing postgres each time. +# +# This trick only works because (a) the Synapse image happens to have all the +# shared libraries that postgres wants, (b) we use a postgres image based on +# the same debian version as Synapse's docker image (so the versions of the +# shared libraries match). -# Copy the entrypoint -COPY conf/start_for_complement.sh / +FROM postgres:13-bullseye AS postgres_base + # initialise the database cluster in /var/lib/postgresql + RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password -# Expose nginx's listener ports -EXPOSE 8008 8448 + # Configure a password and create a database for Synapse + RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single + RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single -ENTRYPOINT ["/start_for_complement.sh"] +# now build the final image, based on the Synapse image. -# Update the healthcheck to have a shorter check interval -HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD /bin/sh /healthcheck.sh +FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION + # copy the postgres installation over from the image we built above + RUN adduser --system --uid 999 postgres --home /var/lib/postgresql + COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql + COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql + COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql + RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql + ENV PATH="${PATH}:/usr/lib/postgresql/13/bin" + ENV PGDATA=/var/lib/postgresql/data + + # Extend the shared homeserver config to disable rate-limiting, + # set Complement's static shared secret, enable registration, amongst other + # tweaks to get Synapse ready for testing. + # To do this, we copy the old template out of the way and then include it + # with Jinja2. + RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2 + COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2 + + WORKDIR /data + + COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf + + # Copy the entrypoint + COPY conf/start_for_complement.sh / + + # Expose nginx's listener ports + EXPOSE 8008 8448 + + ENTRYPOINT ["/start_for_complement.sh"] + + # Update the healthcheck to have a shorter check interval + HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ + CMD /bin/sh /healthcheck.sh diff --git a/docker/complement/conf/postgres.supervisord.conf b/docker/complement/conf/postgres.supervisord.conf index 5dae3e6330..b88bfc772e 100644 --- a/docker/complement/conf/postgres.supervisord.conf +++ b/docker/complement/conf/postgres.supervisord.conf @@ -1,5 +1,5 @@ [program:postgres] -command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground +command=/usr/local/bin/prefix-log gosu postgres postgres # Only start if START_POSTGRES=1 autostart=%(ENV_START_POSTGRES)s -- cgit 1.4.1 From 7be954f59b4a8c98752e72c628c853d448b746ad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Jul 2022 12:06:41 +0100 Subject: Fix a bug which could lead to incorrect state (#13278) There are two fixes here: 1. A long-standing bug where we incorrectly calculated `delta_ids`; and 2. A bug introduced in #13267 where we got current state incorrect. --- changelog.d/13278.bugfix | 1 + synapse/state/__init__.py | 19 ++++++++---- synapse/storage/controllers/persist_events.py | 3 +- tests/test_state.py | 42 ++++++++++++++++++++++++++- 4 files changed, 58 insertions(+), 7 deletions(-) create mode 100644 changelog.d/13278.bugfix diff --git a/changelog.d/13278.bugfix b/changelog.d/13278.bugfix new file mode 100644 index 0000000000..49e9377c79 --- /dev/null +++ b/changelog.d/13278.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where in rare instances Synapse could store the incorrect state for a room after a state resolution. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index dcd272034d..3a65bd0849 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -83,7 +83,7 @@ def _gen_state_id() -> str: class _StateCacheEntry: - __slots__ = ["state", "state_group", "prev_group", "delta_ids"] + __slots__ = ["_state", "state_group", "prev_group", "delta_ids"] def __init__( self, @@ -96,7 +96,10 @@ class _StateCacheEntry: raise Exception("Either state or state group must be not None") # A map from (type, state_key) to event_id. - self.state = frozendict(state) if state is not None else None + # + # This can be None if we have a `state_group` (as then we can fetch the + # state from the DB.) + self._state = frozendict(state) if state is not None else None # the ID of a state group if one and only one is involved. # otherwise, None otherwise? @@ -114,8 +117,8 @@ class _StateCacheEntry: looking up the state group in the DB. """ - if self.state is not None: - return self.state + if self._state is not None: + return self._state assert self.state_group is not None @@ -128,7 +131,7 @@ class _StateCacheEntry: # cache eviction purposes. This is why if `self.state` is None it's fine # to return 1. - return len(self.state) if self.state else 1 + return len(self._state) if self._state else 1 class StateHandler: @@ -743,6 +746,12 @@ def _make_state_cache_entry( delta_ids: Optional[StateMap[str]] = None for old_group, old_state in state_groups_ids.items(): + if old_state.keys() - new_state.keys(): + # Currently we don't support deltas that remove keys from the state + # map, so we have to ignore this group as a candidate to base the + # new group on. + continue + n_delta_ids = {k: v for k, v in new_state.items() if old_state.get(k) != v} if not delta_ids or len(n_delta_ids) < len(delta_ids): prev_group = old_group diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index af65e5913b..cf98b0ab48 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -948,7 +948,8 @@ class EventsPersistenceStorageController: events_context, ) - return res.state, None, new_latest_event_ids + full_state = await res.get_state(self._state_controller) + return full_state, None, new_latest_event_ids async def _prune_extremities( self, diff --git a/tests/test_state.py b/tests/test_state.py index 6ca8d8f21d..e2c0013671 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -21,7 +21,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext -from synapse.state import StateHandler, StateResolutionHandler +from synapse.state import StateHandler, StateResolutionHandler, _make_state_cache_entry from synapse.util import Clock from synapse.util.macaroons import MacaroonGenerator @@ -760,3 +760,43 @@ class StateTestCase(unittest.TestCase): result = yield defer.ensureDeferred(self.state.compute_event_context(event)) return result + + def test_make_state_cache_entry(self): + "Test that calculating a prev_group and delta is correct" + + new_state = { + ("a", ""): "E", + ("b", ""): "E", + ("c", ""): "E", + ("d", ""): "E", + } + + # old_state_1 has fewer differences to new_state than old_state_2, but + # the delta involves deleting a key, which isn't allowed in the deltas, + # so we should pick old_state_2 as the prev_group. + + # `old_state_1` has two differences: `a` and `e` + old_state_1 = { + ("a", ""): "F", + ("b", ""): "E", + ("c", ""): "E", + ("d", ""): "E", + ("e", ""): "E", + } + + # `old_state_2` has three differences: `a`, `c` and `d` + old_state_2 = { + ("a", ""): "F", + ("b", ""): "E", + ("c", ""): "F", + ("d", ""): "F", + } + + entry = _make_state_cache_entry(new_state, {1: old_state_1, 2: old_state_2}) + + self.assertEqual(entry.prev_group, 2) + + # There are three changes from `old_state_2` to `new_state` + self.assertEqual( + entry.delta_ids, {("a", ""): "E", ("c", ""): "E", ("d", ""): "E"} + ) -- cgit 1.4.1 From b116d3ce00fa64c665c189e3fcc1cf7b41d2b115 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 15 Jul 2022 12:47:26 +0100 Subject: Bg update to populate new `events` table columns (#13215) These columns were added back in Synapse 1.52, and have been populated for new events since then. It's now (beyond) time to back-populate them for existing events. --- changelog.d/13215.misc | 1 + .../storage/databases/main/events_bg_updates.py | 87 ++++++++++++++++++++++ .../main/delta/72/03bg_populate_events_columns.py | 47 ++++++++++++ 3 files changed, 135 insertions(+) create mode 100644 changelog.d/13215.misc create mode 100644 synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py diff --git a/changelog.d/13215.misc b/changelog.d/13215.misc new file mode 100644 index 0000000000..3da35addb3 --- /dev/null +++ b/changelog.d/13215.misc @@ -0,0 +1 @@ +Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index eeca85fc94..6e8aeed7b4 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -67,6 +67,8 @@ class _BackgroundUpdates: EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows" EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index" + EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections" + @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: @@ -253,6 +255,11 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): replaces_index="ev_edges_id", ) + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS, + self._background_events_populate_state_key_rejections, + ) + async def _background_reindex_fields_sender( self, progress: JsonDict, batch_size: int ) -> int: @@ -1399,3 +1406,83 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): ) return batch_size + + async def _background_events_populate_state_key_rejections( + self, progress: JsonDict, batch_size: int + ) -> int: + """Back-populate `events.state_key` and `events.rejection_reason""" + + min_stream_ordering_exclusive = progress["min_stream_ordering_exclusive"] + max_stream_ordering_inclusive = progress["max_stream_ordering_inclusive"] + + def _populate_txn(txn: LoggingTransaction) -> bool: + """Returns True if we're done.""" + + # first we need to find an endpoint. + # we need to find the final row in the batch of batch_size, which means + # we need to skip over (batch_size-1) rows and get the next row. + txn.execute( + """ + SELECT stream_ordering FROM events + WHERE stream_ordering > ? AND stream_ordering <= ? + ORDER BY stream_ordering + LIMIT 1 OFFSET ? + """, + ( + min_stream_ordering_exclusive, + max_stream_ordering_inclusive, + batch_size - 1, + ), + ) + + endpoint = None + row = txn.fetchone() + if row: + endpoint = row[0] + + where_clause = "stream_ordering > ?" + args = [min_stream_ordering_exclusive] + if endpoint: + where_clause += " AND stream_ordering <= ?" + args.append(endpoint) + + # now do the updates. + txn.execute( + f""" + UPDATE events + SET state_key = (SELECT state_key FROM state_events se WHERE se.event_id = events.event_id), + rejection_reason = (SELECT reason FROM rejections rej WHERE rej.event_id = events.event_id) + WHERE ({where_clause}) + """, + args, + ) + + logger.info( + "populated new `events` columns up to %s/%i: updated %i rows", + endpoint, + max_stream_ordering_inclusive, + txn.rowcount, + ) + + if endpoint is None: + # we're done + return True + + progress["min_stream_ordering_exclusive"] = endpoint + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS, + progress, + ) + return False + + done = await self.db_pool.runInteraction( + desc="events_populate_state_key_rejections", func=_populate_txn + ) + + if done: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS + ) + + return batch_size diff --git a/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py b/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py new file mode 100644 index 0000000000..55a5d092cc --- /dev/null +++ b/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py @@ -0,0 +1,47 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from synapse.storage.types import Cursor + + +def run_create(cur: Cursor, database_engine, *args, **kwargs): + """Add a bg update to populate the `state_key` and `rejection_reason` columns of `events`""" + + # we know that any new events will have the columns populated (and that has been + # the case since schema_version 68, so there is no chance of rolling back now). + # + # So, we only need to make sure that existing rows are updated. We read the + # current min and max stream orderings, since that is guaranteed to include all + # the events that were stored before the new columns were added. + cur.execute("SELECT MIN(stream_ordering), MAX(stream_ordering) FROM events") + (min_stream_ordering, max_stream_ordering) = cur.fetchone() + + if min_stream_ordering is None: + # no rows, nothing to do. + return + + cur.execute( + "INSERT into background_updates (ordering, update_name, progress_json)" + " VALUES (7203, 'events_populate_state_key_rejections', ?)", + ( + json.dumps( + { + "min_stream_ordering_exclusive": min_stream_ordering - 1, + "max_stream_ordering_inclusive": max_stream_ordering, + } + ), + ), + ) -- cgit 1.4.1 From d765ada84f42d57cda2a3b413df160d65fbb8761 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 15 Jul 2022 13:18:51 +0100 Subject: Update locked frozendict version to 2.3.2 (#13284) `frozendict` 2.3.2 includes a fix for a memory leak in `frozendict.__hash__`. This likely has no impact outside of the deprecated `/initialSync` endpoint, which uses `StreamToken`s, containing `RoomStreamToken`s, containing `frozendict`s, as cache keys. Signed-off-by: Sean Quah --- changelog.d/13284.misc | 1 + poetry.lock | 36 ++++++++++++++++++------------------ 2 files changed, 19 insertions(+), 18 deletions(-) create mode 100644 changelog.d/13284.misc diff --git a/changelog.d/13284.misc b/changelog.d/13284.misc new file mode 100644 index 0000000000..fa9743a10e --- /dev/null +++ b/changelog.d/13284.misc @@ -0,0 +1 @@ +Update locked version of `frozendict` to 2.3.2, which has a fix for a memory leak. diff --git a/poetry.lock b/poetry.lock index b7c0a6869a..3a08c9478d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -290,7 +290,7 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "frozendict" -version = "2.3.0" +version = "2.3.2" description = "A simple immutable dictionary" category = "main" optional = false @@ -1753,23 +1753,23 @@ flake8-comprehensions = [ {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"}, ] frozendict = [ - {file = "frozendict-2.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e18e2abd144a9433b0a8334582843b2aa0d3b9ac8b209aaa912ad365115fe2e1"}, - {file = "frozendict-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96dc7a02e78da5725e5e642269bb7ae792e0c9f13f10f2e02689175ebbfedb35"}, - {file = "frozendict-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:752a6dcfaf9bb20a7ecab24980e4dbe041f154509c989207caf185522ef85461"}, - {file = "frozendict-2.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5346d9fc1c936c76d33975a9a9f1a067342963105d9a403a99e787c939cc2bb2"}, - {file = "frozendict-2.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60dd2253f1bacb63a7c486ec541a968af4f985ffb06602ee8954a3d39ec6bd2e"}, - {file = "frozendict-2.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:b2e044602ce17e5cd86724add46660fb9d80169545164e763300a3b839cb1b79"}, - {file = "frozendict-2.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a27a69b1ac3591e4258325108aee62b53c0eeb6ad0a993ae68d3c7eaea980420"}, - {file = "frozendict-2.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f45ef5f6b184d84744fff97b61f6b9a855e24d36b713ea2352fc723a047afa5"}, - {file = "frozendict-2.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2d3f5016650c0e9a192f5024e68fb4d63f670d0ee58b099ed3f5b4c62ea30ecb"}, - {file = "frozendict-2.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6cf605916f50aabaaba5624c81eb270200f6c2c466c46960237a125ec8fe3ae0"}, - {file = "frozendict-2.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6da06e44904beae4412199d7e49be4f85c6cc168ab06b77c735ea7da5ce3454"}, - {file = "frozendict-2.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:1f34793fb409c4fa70ffd25bea87b01f3bd305fb1c6b09e7dff085b126302206"}, - {file = "frozendict-2.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fd72494a559bdcd28aa71f4aa81860269cd0b7c45fff3e2614a0a053ecfd2a13"}, - {file = "frozendict-2.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00ea9166aa68cc5feed05986206fdbf35e838a09cb3feef998cf35978ff8a803"}, - {file = "frozendict-2.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9ffaf440648b44e0bc694c1a4701801941378ba3ba6541e17750ae4b4aeeb116"}, - {file = "frozendict-2.3.0-py3-none-any.whl", hash = "sha256:8578fe06815fcdcc672bd5603eebc98361a5317c1c3a13b28c6c810f6ea3b323"}, - {file = "frozendict-2.3.0.tar.gz", hash = "sha256:da4231adefc5928e7810da2732269d3ad7b5616295b3e693746392a8205ea0b5"}, + {file = "frozendict-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4fb171d1e84d17335365877e19d17440373b47ca74a73c06f65ac0b16d01e87f"}, + {file = "frozendict-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a3640e9d7533d164160b758351aa49d9e85bbe0bd76d219d4021e90ffa6a52"}, + {file = "frozendict-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:87cfd00fafbc147d8cd2590d1109b7db8ac8d7d5bdaa708ba46caee132b55d4d"}, + {file = "frozendict-2.3.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fb09761e093cfabb2f179dbfdb2521e1ec5701df714d1eb5c51fa7849027be19"}, + {file = "frozendict-2.3.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82176dc7adf01cf8f0193e909401939415a230a1853f4a672ec1629a06ceae18"}, + {file = "frozendict-2.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:c1c70826aa4a50fa283fe161834ac4a3ac7c753902c980bb8b595b0998a38ddb"}, + {file = "frozendict-2.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1db5035ddbed995badd1a62c4102b5e207b5aeb24472df2c60aba79639d7996b"}, + {file = "frozendict-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4246fc4cb1413645ba4d3513939b90d979a5bae724be605a10b2b26ee12f839c"}, + {file = "frozendict-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:680cd42fb0a255da1ce45678ccbd7f69da750d5243809524ebe8f45b2eda6e6b"}, + {file = "frozendict-2.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a7f3a181d6722c92a9fab12d0c5c2b006a18ca5666098531f316d1e1c8984e3"}, + {file = "frozendict-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1cb866eabb3c1384a7fe88e1e1033e2b6623073589012ab637c552bf03f6364"}, + {file = "frozendict-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:952c5e5e664578c5c2ce8489ee0ab6a1855da02b58ef593ee728fc10d672641a"}, + {file = "frozendict-2.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:608b77904cd0117cd816df605a80d0043a5326ee62529327d2136c792165a823"}, + {file = "frozendict-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0eed41fd326f0bcc779837d8d9e1374da1bc9857fe3b9f2910195bbd5fff3aeb"}, + {file = "frozendict-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:bde28db6b5868dd3c45b3555f9d1dc5a1cca6d93591502fa5dcecce0dde6a335"}, + {file = "frozendict-2.3.2-py3-none-any.whl", hash = "sha256:6882a9bbe08ab9b5ff96ce11bdff3fe40b114b9813bc6801261e2a7b45e20012"}, + {file = "frozendict-2.3.2.tar.gz", hash = "sha256:7fac4542f0a13fbe704db4942f41ba3abffec5af8b100025973e59dff6a09d0d"}, ] gitdb = [ {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, -- cgit 1.4.1 From 7281591f4cf81db2fa9e00187d9a91179c6e6a98 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 15 Jul 2022 13:20:47 +0100 Subject: Use state before join to determine if we `_should_perform_remote_join` (#13270) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/13270.bugfix | 1 + synapse/events/builder.py | 2 +- synapse/handlers/room_member.py | 35 ++++++++++++++++++++--------------- synapse/state/__init__.py | 21 +++++++++++++-------- 4 files changed, 35 insertions(+), 24 deletions(-) create mode 100644 changelog.d/13270.bugfix diff --git a/changelog.d/13270.bugfix b/changelog.d/13270.bugfix new file mode 100644 index 0000000000..d023b25eea --- /dev/null +++ b/changelog.d/13270.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.40 where a user invited to a restricted room would be briefly unable to join. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 98c203ada0..4caf6cbdee 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -120,7 +120,7 @@ class EventBuilder: The signed and hashed event. """ if auth_event_ids is None: - state_ids = await self._state.get_current_state_ids( + state_ids = await self._state.compute_state_after_events( self.room_id, prev_event_ids ) auth_event_ids = self._event_auth_handler.compute_auth_events( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 90e0b21600..a5b9ac904e 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -755,14 +755,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): latest_event_ids = await self.store.get_prev_events_for_room(room_id) - current_state_ids = await self.state_handler.get_current_state_ids( - room_id, latest_event_ids=latest_event_ids + state_before_join = await self.state_handler.compute_state_after_events( + room_id, latest_event_ids ) # TODO: Refactor into dictionary of explicitly allowed transitions # between old and new state, with specific error messages for some # transitions and generic otherwise - old_state_id = current_state_ids.get((EventTypes.Member, target.to_string())) + old_state_id = state_before_join.get((EventTypes.Member, target.to_string())) if old_state_id: old_state = await self.store.get_event(old_state_id, allow_none=True) old_membership = old_state.content.get("membership") if old_state else None @@ -813,11 +813,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if action == "kick": raise AuthError(403, "The target user is not in the room") - is_host_in_room = await self._is_host_in_room(current_state_ids) + is_host_in_room = await self._is_host_in_room(state_before_join) if effective_membership_state == Membership.JOIN: if requester.is_guest: - guest_can_join = await self._can_guest_join(current_state_ids) + guest_can_join = await self._can_guest_join(state_before_join) if not guest_can_join: # This should be an auth check, but guests are a local concept, # so don't really fit into the general auth process. @@ -855,7 +855,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # Check if a remote join should be performed. remote_join, remote_room_hosts = await self._should_perform_remote_join( - target.to_string(), room_id, remote_room_hosts, content, is_host_in_room + target.to_string(), + room_id, + remote_room_hosts, + content, + is_host_in_room, + state_before_join, ) if remote_join: if ratelimit: @@ -995,6 +1000,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): remote_room_hosts: List[str], content: JsonDict, is_host_in_room: bool, + state_before_join: StateMap[str], ) -> Tuple[bool, List[str]]: """ Check whether the server should do a remote join (as opposed to a local @@ -1014,6 +1020,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): content: The content to use as the event body of the join. This may be modified. is_host_in_room: True if the host is in the room. + state_before_join: The state before the join event (i.e. the resolution of + the states after its parent events). Returns: A tuple of: @@ -1030,20 +1038,17 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # If the host is in the room, but not one of the authorised hosts # for restricted join rules, a remote join must be used. room_version = await self.store.get_room_version(room_id) - current_state_ids = await self._storage_controllers.state.get_current_state_ids( - room_id - ) # If restricted join rules are not being used, a local join can always # be used. if not await self.event_auth_handler.has_restricted_join_rules( - current_state_ids, room_version + state_before_join, room_version ): return False, [] # If the user is invited to the room or already joined, the join # event can always be issued locally. - prev_member_event_id = current_state_ids.get((EventTypes.Member, user_id), None) + prev_member_event_id = state_before_join.get((EventTypes.Member, user_id), None) prev_member_event = None if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) @@ -1058,10 +1063,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # # If not, generate a new list of remote hosts based on which # can issue invites. - event_map = await self.store.get_events(current_state_ids.values()) + event_map = await self.store.get_events(state_before_join.values()) current_state = { state_key: event_map[event_id] - for state_key, event_id in current_state_ids.items() + for state_key, event_id in state_before_join.items() } allowed_servers = get_servers_from_users( get_users_which_can_issue_invite(current_state) @@ -1075,7 +1080,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # Ensure the member should be allowed access via membership in a room. await self.event_auth_handler.check_restricted_join_rules( - current_state_ids, room_version, user_id, prev_member_event + state_before_join, room_version, user_id, prev_member_event ) # If this is going to be a local join, additional information must @@ -1085,7 +1090,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): EventContentFields.AUTHORISING_USER ] = await self.event_auth_handler.get_user_which_could_invite( room_id, - current_state_ids, + state_before_join, ) return False, [] diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 3a65bd0849..56606e9afb 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -153,22 +153,27 @@ class StateHandler: ReplicationUpdateCurrentStateRestServlet.make_client(hs) ) - async def get_current_state_ids( + async def compute_state_after_events( self, room_id: str, - latest_event_ids: Collection[str], + event_ids: Collection[str], ) -> StateMap[str]: - """Get the current state, or the state at a set of events, for a room + """Fetch the state after each of the given event IDs. Resolve them and return. + + This is typically used where `event_ids` is a collection of forward extremities + in a room, intended to become the `prev_events` of a new event E. If so, the + return value of this function represents the state before E. Args: - room_id: - latest_event_ids: The forward extremities to resolve. + room_id: the room_id containing the given events. + event_ids: the events whose state should be fetched and resolved. Returns: - the state dict, mapping from (event_type, state_key) -> event_id + the state dict (a mapping from (event_type, state_key) -> event_id) which + holds the resolution of the states after the given event IDs. """ - logger.debug("calling resolve_state_groups from get_current_state_ids") - ret = await self.resolve_state_groups_for_events(room_id, latest_event_ids) + logger.debug("calling resolve_state_groups from compute_state_after_events") + ret = await self.resolve_state_groups_for_events(room_id, event_ids) return await ret.get_state(self._state_storage_controller, StateFilter.all()) async def get_current_users_in_room( -- cgit 1.4.1 From 3343035a06c6e4c5507d3acbd335bb8d4743d9b2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 15 Jul 2022 08:22:43 -0400 Subject: Use a real room in the notification rotation tests. (#13260) Instead of manually inserting fake data. This fixes some issues with having to manually calculate stream orderings and other oddities. --- changelog.d/13260.misc | 1 + tests/storage/test_event_push_actions.py | 195 +++++++++++++------------------ 2 files changed, 80 insertions(+), 116 deletions(-) create mode 100644 changelog.d/13260.misc diff --git a/changelog.d/13260.misc b/changelog.d/13260.misc new file mode 100644 index 0000000000..b55ff32c76 --- /dev/null +++ b/changelog.d/13260.misc @@ -0,0 +1 @@ +Clean-up tests for notifications. diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index e8c53f16d9..ba40124c8a 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock - from twisted.test.proto_helpers import MemoryReactor +from synapse.rest import admin +from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.storage.databases.main.event_push_actions import NotifCounts from synapse.util import Clock @@ -24,15 +24,14 @@ from tests.unittest import HomeserverTestCase USER_ID = "@user:example.com" -PlAIN_NOTIF = ["notify", {"set_tweak": "highlight", "value": False}] -HIGHLIGHT = [ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight"}, -] - class EventPushActionsStoreTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main persist_events_store = hs.get_datastores().persist_events @@ -54,154 +53,118 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ) def test_count_aggregation(self) -> None: - room_id = "!foo:example.com" - user_id = "@user1235:test" + # Create a user to receive notifications and send receipts. + user_id = self.register_user("user1235", "pass") + token = self.login("user1235", "pass") + + # And another users to send events. + other_id = self.register_user("other", "pass") + other_token = self.login("other", "pass") + + # Create a room and put both users in it. + room_id = self.helper.create_room_as(user_id, tok=token) + self.helper.join(room_id, other_id, tok=other_token) - last_read_stream_ordering = [0] + last_event_id: str - def _assert_counts(noitf_count: int, highlight_count: int) -> None: + def _assert_counts( + noitf_count: int, unread_count: int, highlight_count: int + ) -> None: counts = self.get_success( self.store.db_pool.runInteraction( - "", - self.store._get_unread_counts_by_pos_txn, + "get-unread-counts", + self.store._get_unread_counts_by_receipt_txn, room_id, user_id, - last_read_stream_ordering[0], ) ) self.assertEqual( counts, NotifCounts( notify_count=noitf_count, - unread_count=0, # Unread counts are tested in the sync tests. + unread_count=unread_count, highlight_count=highlight_count, ), ) - def _inject_actions(stream: int, action: list) -> None: - event = Mock() - event.room_id = room_id - event.event_id = f"$test{stream}:example.com" - event.internal_metadata.stream_ordering = stream - event.internal_metadata.is_outlier.return_value = False - event.depth = stream - - self.store._events_stream_cache.entity_has_changed(room_id, stream) - - self.get_success( - self.store.db_pool.simple_insert( - table="events", - values={ - "stream_ordering": stream, - "topological_ordering": stream, - "type": "m.room.message", - "room_id": room_id, - "processed": True, - "outlier": False, - "event_id": event.event_id, - }, - ) + def _create_event(highlight: bool = False) -> str: + result = self.helper.send_event( + room_id, + type="m.room.message", + content={"msgtype": "m.text", "body": user_id if highlight else "msg"}, + tok=other_token, ) + nonlocal last_event_id + last_event_id = result["event_id"] + return last_event_id - self.get_success( - self.store.add_push_actions_to_staging( - event.event_id, - {user_id: action}, - False, - ) - ) - self.get_success( - self.store.db_pool.runInteraction( - "", - self.persist_events_store._set_push_actions_for_event_and_users_txn, - [(event, None)], - [(event, None)], - ) - ) - - def _rotate(stream: int) -> None: - self.get_success( - self.store.db_pool.runInteraction( - "rotate-receipts", self.store._handle_new_receipts_for_notifs_txn - ) - ) - - self.get_success( - self.store.db_pool.runInteraction( - "rotate-notifs", self.store._rotate_notifs_before_txn, stream - ) - ) - - def _mark_read(stream: int, depth: int) -> None: - last_read_stream_ordering[0] = stream + def _rotate() -> None: + self.get_success(self.store._rotate_notifs()) + def _mark_read(event_id: str) -> None: self.get_success( self.store.insert_receipt( room_id, "m.read", user_id=user_id, - event_ids=[f"$test{stream}:example.com"], + event_ids=[event_id], data={}, ) ) - _assert_counts(0, 0) - _inject_actions(1, PlAIN_NOTIF) - _assert_counts(1, 0) - _rotate(1) - _assert_counts(1, 0) + _assert_counts(0, 0, 0) + _create_event() + _assert_counts(1, 1, 0) + _rotate() + _assert_counts(1, 1, 0) - _inject_actions(3, PlAIN_NOTIF) - _assert_counts(2, 0) - _rotate(3) - _assert_counts(2, 0) + event_id = _create_event() + _assert_counts(2, 2, 0) + _rotate() + _assert_counts(2, 2, 0) - _inject_actions(5, PlAIN_NOTIF) - _mark_read(3, 3) - _assert_counts(1, 0) + _create_event() + _mark_read(event_id) + _assert_counts(1, 1, 0) - _mark_read(5, 5) - _assert_counts(0, 0) + _mark_read(last_event_id) + _assert_counts(0, 0, 0) - _inject_actions(6, PlAIN_NOTIF) - _rotate(6) - _assert_counts(1, 0) - - self.get_success( - self.store.db_pool.simple_delete( - table="event_push_actions", keyvalues={"1": 1}, desc="" - ) - ) + _create_event() + _rotate() + _assert_counts(1, 1, 0) - _assert_counts(1, 0) + # Delete old event push actions, this should not affect the (summarised) count. + self.get_success(self.store._remove_old_push_actions_that_have_rotated()) + _assert_counts(1, 1, 0) - _mark_read(6, 6) - _assert_counts(0, 0) + _mark_read(last_event_id) + _assert_counts(0, 0, 0) - _inject_actions(8, HIGHLIGHT) - _assert_counts(1, 1) - _rotate(8) - _assert_counts(1, 1) + event_id = _create_event(True) + _assert_counts(1, 1, 1) + _rotate() + _assert_counts(1, 1, 1) # Check that adding another notification and rotating after highlight # works. - _inject_actions(10, PlAIN_NOTIF) - _rotate(10) - _assert_counts(2, 1) + _create_event() + _rotate() + _assert_counts(2, 2, 1) # Check that sending read receipts at different points results in the # right counts. - _mark_read(8, 8) - _assert_counts(1, 0) - _mark_read(10, 10) - _assert_counts(0, 0) - - _inject_actions(11, HIGHLIGHT) - _assert_counts(1, 1) - _mark_read(11, 11) - _assert_counts(0, 0) - _rotate(11) - _assert_counts(0, 0) + _mark_read(event_id) + _assert_counts(1, 1, 0) + _mark_read(last_event_id) + _assert_counts(0, 0, 0) + + _create_event(True) + _assert_counts(1, 1, 1) + _mark_read(last_event_id) + _assert_counts(0, 0, 0) + _rotate() + _assert_counts(0, 0, 0) def test_find_first_stream_ordering_after_ts(self) -> None: def add_event(so: int, ts: int) -> None: -- cgit 1.4.1 From 0731e0829c08aec7f31fdc72c236757e4cc38747 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Jul 2022 13:59:45 +0100 Subject: Don't pull out the full state when storing state (#13274) --- changelog.d/13274.misc | 1 + synapse/state/__init__.py | 36 ++++--- synapse/storage/controllers/state.py | 2 +- synapse/storage/databases/state/store.py | 156 ++++++++++++++++++++----------- tests/rest/client/test_rooms.py | 4 +- tests/test_state.py | 4 + 6 files changed, 132 insertions(+), 71 deletions(-) create mode 100644 changelog.d/13274.misc diff --git a/changelog.d/13274.misc b/changelog.d/13274.misc new file mode 100644 index 0000000000..a334414320 --- /dev/null +++ b/changelog.d/13274.misc @@ -0,0 +1 @@ +Don't pull out state in `compute_event_context` for unconflicted state. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 56606e9afb..fcb7e829d4 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -298,12 +298,18 @@ class StateHandler: state_group_before_event_prev_group = entry.prev_group deltas_to_state_group_before_event = entry.delta_ids + state_ids_before_event = None # We make sure that we have a state group assigned to the state. if entry.state_group is None: - state_ids_before_event = await entry.get_state( - self._state_storage_controller, StateFilter.all() - ) + # store_state_group requires us to have either a previous state group + # (with deltas) or the complete state map. So, if we don't have a + # previous state group, load the complete state map now. + if state_group_before_event_prev_group is None: + state_ids_before_event = await entry.get_state( + self._state_storage_controller, StateFilter.all() + ) + state_group_before_event = ( await self._state_storage_controller.store_state_group( event.event_id, @@ -316,7 +322,6 @@ class StateHandler: entry.state_group = state_group_before_event else: state_group_before_event = entry.state_group - state_ids_before_event = None # # now if it's not a state event, we're done @@ -336,19 +341,20 @@ class StateHandler: # # otherwise, we'll need to create a new state group for after the event # - if state_ids_before_event is None: - state_ids_before_event = await entry.get_state( - self._state_storage_controller, StateFilter.all() - ) key = (event.type, event.state_key) - if key in state_ids_before_event: - replaces = state_ids_before_event[key] - if replaces != event.event_id: - event.unsigned["replaces_state"] = replaces - state_ids_after_event = dict(state_ids_before_event) - state_ids_after_event[key] = event.event_id + if state_ids_before_event is not None: + replaces = state_ids_before_event.get(key) + else: + replaces_state_map = await entry.get_state( + self._state_storage_controller, StateFilter.from_types([key]) + ) + replaces = replaces_state_map.get(key) + + if replaces and replaces != event.event_id: + event.unsigned["replaces_state"] = replaces + delta_ids = {key: event.event_id} state_group_after_event = ( @@ -357,7 +363,7 @@ class StateHandler: event.room_id, prev_group=state_group_before_event, delta_ids=delta_ids, - current_state_ids=state_ids_after_event, + current_state_ids=None, ) ) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index d3a44bc876..e08f956e6e 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -346,7 +346,7 @@ class StateStorageController: room_id: str, prev_group: Optional[int], delta_ids: Optional[StateMap[str]], - current_state_ids: StateMap[str], + current_state_ids: Optional[StateMap[str]], ) -> int: """Store a new set of state, returning a newly assigned state group. diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 609a2b88bf..afbc85ad0c 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -400,14 +400,17 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): room_id: str, prev_group: Optional[int], delta_ids: Optional[StateMap[str]], - current_state_ids: StateMap[str], + current_state_ids: Optional[StateMap[str]], ) -> int: """Store a new set of state, returning a newly assigned state group. + At least one of `current_state_ids` and `prev_group` must be provided. Whenever + `prev_group` is not None, `delta_ids` must also not be None. + Args: event_id: The event ID for which the state was calculated room_id - prev_group: A previous state group for the room, optional. + prev_group: A previous state group for the room. delta_ids: The delta between state at `prev_group` and `current_state_ids`, if `prev_group` was given. Same format as `current_state_ids`. @@ -418,10 +421,41 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): The state group ID """ - def _store_state_group_txn(txn: LoggingTransaction) -> int: - if current_state_ids is None: - # AFAIK, this can never happen - raise Exception("current_state_ids cannot be None") + if prev_group is None and current_state_ids is None: + raise Exception("current_state_ids and prev_group can't both be None") + + if prev_group is not None and delta_ids is None: + raise Exception("delta_ids is None when prev_group is not None") + + def insert_delta_group_txn( + txn: LoggingTransaction, prev_group: int, delta_ids: StateMap[str] + ) -> Optional[int]: + """Try and persist the new group as a delta. + + Requires that we have the state as a delta from a previous state group. + + Returns: + The state group if successfully created, or None if the state + needs to be persisted as a full state. + """ + is_in_db = self.db_pool.simple_select_one_onecol_txn( + txn, + table="state_groups", + keyvalues={"id": prev_group}, + retcol="id", + allow_none=True, + ) + if not is_in_db: + raise Exception( + "Trying to persist state with unpersisted prev_group: %r" + % (prev_group,) + ) + + # if the chain of state group deltas is going too long, we fall back to + # persisting a complete state group. + potential_hops = self._count_state_group_hops_txn(txn, prev_group) + if potential_hops >= MAX_STATE_DELTA_HOPS: + return None state_group = self._state_group_seq_gen.get_next_id_txn(txn) @@ -431,51 +465,45 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): values={"id": state_group, "room_id": room_id, "event_id": event_id}, ) - # We persist as a delta if we can, while also ensuring the chain - # of deltas isn't tooo long, as otherwise read performance degrades. - if prev_group: - is_in_db = self.db_pool.simple_select_one_onecol_txn( - txn, - table="state_groups", - keyvalues={"id": prev_group}, - retcol="id", - allow_none=True, - ) - if not is_in_db: - raise Exception( - "Trying to persist state with unpersisted prev_group: %r" - % (prev_group,) - ) - - potential_hops = self._count_state_group_hops_txn(txn, prev_group) - if prev_group and potential_hops < MAX_STATE_DELTA_HOPS: - assert delta_ids is not None - - self.db_pool.simple_insert_txn( - txn, - table="state_group_edges", - values={"state_group": state_group, "prev_state_group": prev_group}, - ) + self.db_pool.simple_insert_txn( + txn, + table="state_group_edges", + values={"state_group": state_group, "prev_state_group": prev_group}, + ) - self.db_pool.simple_insert_many_txn( - txn, - table="state_groups_state", - keys=("state_group", "room_id", "type", "state_key", "event_id"), - values=[ - (state_group, room_id, key[0], key[1], state_id) - for key, state_id in delta_ids.items() - ], - ) - else: - self.db_pool.simple_insert_many_txn( - txn, - table="state_groups_state", - keys=("state_group", "room_id", "type", "state_key", "event_id"), - values=[ - (state_group, room_id, key[0], key[1], state_id) - for key, state_id in current_state_ids.items() - ], - ) + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), + values=[ + (state_group, room_id, key[0], key[1], state_id) + for key, state_id in delta_ids.items() + ], + ) + + return state_group + + def insert_full_state_txn( + txn: LoggingTransaction, current_state_ids: StateMap[str] + ) -> int: + """Persist the full state, returning the new state group.""" + state_group = self._state_group_seq_gen.get_next_id_txn(txn) + + self.db_pool.simple_insert_txn( + txn, + table="state_groups", + values={"id": state_group, "room_id": room_id, "event_id": event_id}, + ) + + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), + values=[ + (state_group, room_id, key[0], key[1], state_id) + for key, state_id in current_state_ids.items() + ], + ) # Prefill the state group caches with this group. # It's fine to use the sequence like this as the state group map @@ -491,7 +519,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, - value=dict(current_member_state_ids), + value=current_member_state_ids, ) current_non_member_state_ids = { @@ -503,13 +531,35 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, - value=dict(current_non_member_state_ids), + value=current_non_member_state_ids, ) return state_group + if prev_group is not None: + state_group = await self.db_pool.runInteraction( + "store_state_group.insert_delta_group", + insert_delta_group_txn, + prev_group, + delta_ids, + ) + if state_group is not None: + return state_group + + # We're going to persist the state as a complete group rather than + # a delta, so first we need to ensure we have loaded the state map + # from the database. + if current_state_ids is None: + assert prev_group is not None + assert delta_ids is not None + groups = await self._get_state_for_groups([prev_group]) + current_state_ids = dict(groups[prev_group]) + current_state_ids.update(delta_ids) + return await self.db_pool.runInteraction( - "store_state_group", _store_state_group_txn + "store_state_group.insert_full_state", + insert_full_state_txn, + current_state_ids, ) async def purge_unreferenced_state_groups( diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 8ed5272b16..06221b806a 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -709,7 +709,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(32, channel.resource_usage.db_txn_count) + self.assertEqual(36, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -722,7 +722,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(35, channel.resource_usage.db_txn_count) + self.assertEqual(40, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id diff --git a/tests/test_state.py b/tests/test_state.py index e2c0013671..bafd6d1750 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -99,6 +99,10 @@ class _DummyStore: state_group = self._next_group self._next_group += 1 + if current_state_ids is None: + current_state_ids = dict(self._group_to_state[prev_group]) + current_state_ids.update(delta_ids) + self._group_to_state[state_group] = dict(current_state_ids) return state_group -- cgit 1.4.1 From e9ce4d089bbb013f870bbc8d58ec796e8f315eb4 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 15 Jul 2022 16:18:47 +0100 Subject: Use and recommend poetry 1.1.14, up from 1.1.12 (#13285) --- .ci/scripts/test_old_deps.sh | 2 +- .github/workflows/twisted_trunk.yml | 4 ++-- changelog.d/13285.misc | 1 + docker/Dockerfile | 2 +- docs/development/dependencies.md | 25 +++++++++++++++++++++++++ 5 files changed, 30 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13285.misc diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/test_old_deps.sh index 7d0625fa86..478c8d639a 100755 --- a/.ci/scripts/test_old_deps.sh +++ b/.ci/scripts/test_old_deps.sh @@ -69,7 +69,7 @@ with open('pyproject.toml', 'w') as f: " python3 -c "$REMOVE_DEV_DEPENDENCIES" -pipx install poetry==1.1.12 +pipx install poetry==1.1.14 ~/.local/bin/poetry lock echo "::group::Patched pyproject.toml" diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index f35e82297f..dd8e6fbb1c 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -127,12 +127,12 @@ jobs: run: | set -x DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx - pipx install poetry==1.1.12 + pipx install poetry==1.1.14 poetry remove -n twisted poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk poetry lock --no-update - # NOT IN 1.1.12 poetry lock --check + # NOT IN 1.1.14 poetry lock --check working-directory: synapse - run: | diff --git a/changelog.d/13285.misc b/changelog.d/13285.misc new file mode 100644 index 0000000000..b7bcbadb5b --- /dev/null +++ b/changelog.d/13285.misc @@ -0,0 +1 @@ +Upgrade from Poetry 1.1.14 to 1.1.12, to fix bugs when locking packages. diff --git a/docker/Dockerfile b/docker/Dockerfile index 22707ed142..f4d8e6c925 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -45,7 +45,7 @@ RUN \ # We install poetry in its own build stage to avoid its dependencies conflicting with # synapse's dependencies. -# We use a specific commit from poetry's master branch instead of our usual 1.1.12, +# We use a specific commit from poetry's master branch instead of our usual 1.1.14, # to incorporate fixes to some bugs in `poetry export`. This commit corresponds to # https://github.com/python-poetry/poetry/pull/5156 and # https://github.com/python-poetry/poetry/issues/5141 ; diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index 8ef7d357d8..236856a6b0 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -237,3 +237,28 @@ poetry run pip install build && poetry run python -m build because [`build`](https://github.com/pypa/build) is a standardish tool which doesn't require poetry. (It's what we use in CI too). However, you could try `poetry build` too. + + +# Troubleshooting + +## Check the version of poetry with `poetry --version`. + +At the time of writing, the 1.2 series is beta only. We have seen some examples +where the lockfiles generated by 1.2 prereleasese aren't interpreted correctly +by poetry 1.1.x. For now, use poetry 1.1.14, which includes a critical +[change](https://github.com/python-poetry/poetry/pull/5973) needed to remain +[compatible with PyPI](https://github.com/pypi/warehouse/pull/11775). + +It can also be useful to check the version of `poetry-core` in use. If you've +installed `poetry` with `pipx`, try `pipx runpip poetry list | grep poetry-core`. + +## Clear caches: `poetry cache clear --all pypi`. + +Poetry caches a bunch of information about packages that isn't readily available +from PyPI. (This is what makes poetry seem slow when doing the first +`poetry install`.) Try `poetry cache list` and `poetry cache clear --all +` to see if that fixes things. + +## Try `--verbose` or `--dry-run` arguments. + +Sometimes useful to see what poetry's internal logic is. -- cgit 1.4.1 From 7b67e93d499cb45f7217e9dfea046ed8b5c455fd Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 15 Jul 2022 11:42:21 -0500 Subject: Provide more info why we don't have any thumbnails to serve (#13038) Fix https://github.com/matrix-org/synapse/issues/13016 ## New error code and status ### Before Previously, we returned a `404` for `/thumbnail` which isn't even in the spec. ```json { "errcode": "M_NOT_FOUND", "error": "Not found [b'hs1', b'tefQeZhmVxoiBfuFQUKRzJxc']" } ``` ### After What does the spec say? > 400: The request does not make sense to the server, or the server cannot thumbnail the content. For example, the client requested non-integer dimensions or asked for negatively-sized images. > > *-- https://spec.matrix.org/v1.1/client-server-api/#get_matrixmediav3thumbnailservernamemediaid* Now with this PR, we respond with a `400` when we don't have thumbnails to serve and we explain why we might not have any thumbnails. ```json { "errcode": "M_UNKNOWN", "error": "Cannot find any thumbnails for the requested media ([b'example.com', b'12345']). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", } ``` > Cannot find any thumbnails for the requested media ([b'example.com', b'12345']). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.) --- We still respond with a 404 in many other places. But we can iterate on those later and maybe keep some in some specific places after spec updates/clarification: https://github.com/matrix-org/matrix-spec/issues/1122 We can also iterate on the bugs where Synapse doesn't thumbnail when it should in other issues/PRs. --- changelog.d/13038.feature | 1 + synapse/config/repository.py | 35 ++++++++++++--- synapse/rest/media/v1/thumbnail_resource.py | 40 ++++++++++++++++- tests/rest/media/v1/test_media_storage.py | 70 +++++++++++++++++++++++++---- 4 files changed, 129 insertions(+), 17 deletions(-) create mode 100644 changelog.d/13038.feature diff --git a/changelog.d/13038.feature b/changelog.d/13038.feature new file mode 100644 index 0000000000..1278f1b4e9 --- /dev/null +++ b/changelog.d/13038.feature @@ -0,0 +1 @@ +Provide more info why we don't have any thumbnails to serve. diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 3c69dd325f..1033496bb4 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -42,6 +42,18 @@ THUMBNAIL_SIZE_YAML = """\ # method: %(method)s """ +# A map from the given media type to the type of thumbnail we should generate +# for it. +THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP = { + "image/jpeg": "jpeg", + "image/jpg": "jpeg", + "image/webp": "jpeg", + # Thumbnails can only be jpeg or png. We choose png thumbnails for gif + # because it can have transparency. + "image/gif": "png", + "image/png": "png", +} + HTTP_PROXY_SET_WARNING = """\ The Synapse config url_preview_ip_range_blacklist will be ignored as an HTTP(s) proxy is configured.""" @@ -79,13 +91,22 @@ def parse_thumbnail_requirements( width = size["width"] height = size["height"] method = size["method"] - jpeg_thumbnail = ThumbnailRequirement(width, height, method, "image/jpeg") - png_thumbnail = ThumbnailRequirement(width, height, method, "image/png") - requirements.setdefault("image/jpeg", []).append(jpeg_thumbnail) - requirements.setdefault("image/jpg", []).append(jpeg_thumbnail) - requirements.setdefault("image/webp", []).append(jpeg_thumbnail) - requirements.setdefault("image/gif", []).append(png_thumbnail) - requirements.setdefault("image/png", []).append(png_thumbnail) + + for format, thumbnail_format in THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.items(): + requirement = requirements.setdefault(format, []) + if thumbnail_format == "jpeg": + requirement.append( + ThumbnailRequirement(width, height, method, "image/jpeg") + ) + elif thumbnail_format == "png": + requirement.append( + ThumbnailRequirement(width, height, method, "image/png") + ) + else: + raise Exception( + "Unknown thumbnail mapping from %s to %s. This is a Synapse problem, please report!" + % (format, thumbnail_format) + ) return { media_type: tuple(thumbnails) for media_type, thumbnails in requirements.items() } diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 2295adfaa7..5f725c7600 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -17,9 +17,11 @@ import logging from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple -from synapse.api.errors import SynapseError +from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP from synapse.http.server import ( DirectServeJsonResource, + respond_with_json, set_corp_headers, set_cors_headers, ) @@ -309,6 +311,19 @@ class ThumbnailResource(DirectServeJsonResource): url_cache: True if this is from a URL cache. server_name: The server name, if this is a remote thumbnail. """ + logger.debug( + "_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s", + media_id, + desired_width, + desired_height, + desired_method, + thumbnail_infos, + ) + + # If `dynamic_thumbnails` is enabled, we expect Synapse to go down a + # different code path to handle it. + assert not self.dynamic_thumbnails + if thumbnail_infos: file_info = self._select_thumbnail( desired_width, @@ -384,8 +399,29 @@ class ThumbnailResource(DirectServeJsonResource): file_info.thumbnail.length, ) else: + # This might be because: + # 1. We can't create thumbnails for the given media (corrupted or + # unsupported file type), or + # 2. The thumbnailing process never ran or errored out initially + # when the media was first uploaded (these bugs should be + # reported and fixed). + # Note that we don't attempt to generate a thumbnail now because + # `dynamic_thumbnails` is disabled. logger.info("Failed to find any generated thumbnails") - respond_404(request) + + respond_with_json( + request, + 400, + cs_error( + "Cannot find any thumbnails for the requested media (%r). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)" + % ( + request.postpath, + ", ".join(THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.keys()), + ), + code=Codes.UNKNOWN, + ), + send_cors=True, + ) def _select_thumbnail( self, diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 79727c430f..d18fc13c21 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -126,7 +126,9 @@ class _TestImage: expected_scaled: The expected bytes from scaled thumbnailing, or None if test should just check for a valid image returned. expected_found: True if the file should exist on the server, or False if - a 404 is expected. + a 404/400 is expected. + unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or + False if the thumbnailing should succeed or a normal 404 is expected. """ data: bytes @@ -135,6 +137,7 @@ class _TestImage: expected_cropped: Optional[bytes] = None expected_scaled: Optional[bytes] = None expected_found: bool = True + unable_to_thumbnail: bool = False @parameterized_class( @@ -192,6 +195,7 @@ class _TestImage: b"image/gif", b".gif", expected_found=False, + unable_to_thumbnail=True, ), ), ], @@ -366,18 +370,29 @@ class MediaRepoTests(unittest.HomeserverTestCase): def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" self._test_thumbnail( - "crop", self.test_image.expected_cropped, self.test_image.expected_found + "crop", + self.test_image.expected_cropped, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) def test_thumbnail_scale(self) -> None: """Test that a scaled remote thumbnail is available.""" self._test_thumbnail( - "scale", self.test_image.expected_scaled, self.test_image.expected_found + "scale", + self.test_image.expected_scaled, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) def test_invalid_type(self) -> None: """An invalid thumbnail type is never available.""" - self._test_thumbnail("invalid", None, False) + self._test_thumbnail( + "invalid", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) @unittest.override_config( {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]} @@ -386,7 +401,12 @@ class MediaRepoTests(unittest.HomeserverTestCase): """ Override the config to generate only scaled thumbnails, but request a cropped one. """ - self._test_thumbnail("crop", None, False) + self._test_thumbnail( + "crop", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) @unittest.override_config( {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]} @@ -395,14 +415,22 @@ class MediaRepoTests(unittest.HomeserverTestCase): """ Override the config to generate only cropped thumbnails, but request a scaled one. """ - self._test_thumbnail("scale", None, False) + self._test_thumbnail( + "scale", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) def test_thumbnail_repeated_thumbnail(self) -> None: """Test that fetching the same thumbnail works, and deleting the on disk thumbnail regenerates it. """ self._test_thumbnail( - "scale", self.test_image.expected_scaled, self.test_image.expected_found + "scale", + self.test_image.expected_scaled, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) if not self.test_image.expected_found: @@ -459,8 +487,24 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) def _test_thumbnail( - self, method: str, expected_body: Optional[bytes], expected_found: bool + self, + method: str, + expected_body: Optional[bytes], + expected_found: bool, + unable_to_thumbnail: bool = False, ) -> None: + """Test the given thumbnailing method works as expected. + + Args: + method: The thumbnailing method to use (crop, scale). + expected_body: The expected bytes from thumbnailing, or None if + test should just check for a valid image. + expected_found: True if the file should exist on the server, or False if + a 404/400 is expected. + unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or + False if the thumbnailing should succeed or a normal 404 is expected. + """ + params = "?width=32&height=32&method=" + method channel = make_request( self.reactor, @@ -496,6 +540,16 @@ class MediaRepoTests(unittest.HomeserverTestCase): else: # ensure that the result is at least some valid image Image.open(BytesIO(channel.result["body"])) + elif unable_to_thumbnail: + # A 400 with a JSON body. + self.assertEqual(channel.code, 400) + self.assertEqual( + channel.json_body, + { + "errcode": "M_UNKNOWN", + "error": "Cannot find any thumbnails for the requested media ([b'example.com', b'12345']). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", + }, + ) else: # A 404 with a JSON body. self.assertEqual(channel.code, 404) -- cgit 1.4.1 From 96cf81e312407f0caba1b45ba9899906b1dcc098 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 15 Jul 2022 21:31:27 +0200 Subject: Use HTTPStatus constants in place of literals in tests. (#13297) --- changelog.d/13297.misc | 1 + tests/federation/test_complexity.py | 5 +- tests/federation/test_federation_server.py | 11 +- tests/federation/transport/test_knocking.py | 5 +- tests/handlers/test_password_providers.py | 41 ++-- tests/rest/admin/test_user.py | 16 +- tests/rest/client/test_account.py | 71 +++--- tests/rest/client/test_login.py | 55 ++--- tests/rest/client/test_rooms.py | 341 ++++++++++++++++------------ 9 files changed, 308 insertions(+), 238 deletions(-) create mode 100644 changelog.d/13297.misc diff --git a/changelog.d/13297.misc b/changelog.d/13297.misc new file mode 100644 index 0000000000..545a62369f --- /dev/null +++ b/changelog.d/13297.misc @@ -0,0 +1 @@ +Use `HTTPStatus` constants in place of literals in tests. \ No newline at end of file diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 9f1115dd23..c6dd99316a 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError @@ -50,7 +51,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) complexity = channel.json_body["v1"] self.assertTrue(complexity > 0, complexity) @@ -62,7 +63,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index fd15e680ed..8ea13ceb93 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from http import HTTPStatus from parameterized import parameterized @@ -58,7 +59,7 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase): "/_matrix/federation/v1/get_missing_events/%s" % (room_1,), query_content, ) - self.assertEqual(400, channel.code, channel.result) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON") @@ -119,7 +120,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/state/%s?event_id=xyz" % (room_1,) ) - self.assertEqual(403, channel.code, channel.result) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -153,7 +154,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): f"/_matrix/federation/v1/make_join/{self._room_id}/{user_id}" f"?ver={DEFAULT_ROOM_VERSION}", ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) return channel.json_body def test_send_join(self): @@ -171,7 +172,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): f"/_matrix/federation/v2/send_join/{self._room_id}/x", content=join_event_dict, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # we should get complete room state back returned_state = [ @@ -226,7 +227,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): f"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true", content=join_event_dict, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # expect a reduced room state returned_state = [ diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index d21c11b716..0d048207b7 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict +from http import HTTPStatus from typing import Dict, List from synapse.api.constants import EventTypes, JoinRules, Membership @@ -255,7 +256,7 @@ class FederationKnockingTestCase( RoomVersions.V7.identifier, ), ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # Note: We don't expect the knock membership event to be sent over federation as # part of the stripped room state, as the knocking homeserver already has that @@ -293,7 +294,7 @@ class FederationKnockingTestCase( % (room_id, signed_knock_event.event_id), signed_knock_event_json, ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # Check that we got the stripped room state in return room_state_events = channel.json_body["knock_state_events"] diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 82b3bb3b73..4c62449c89 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -14,6 +14,7 @@ """Tests for the password_auth_provider interface""" +from http import HTTPStatus from typing import Any, Type, Union from unittest.mock import Mock @@ -188,14 +189,14 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # check_password must return an awaitable mock_password_provider.check_password.return_value = make_awaitable(True) channel = self._send_password_login("u", "p") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@u:test", channel.json_body["user_id"]) mock_password_provider.check_password.assert_called_once_with("@u:test", "p") mock_password_provider.reset_mock() # login with mxid should work too channel = self._send_password_login("@u:bz", "p") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@u:bz", channel.json_body["user_id"]) mock_password_provider.check_password.assert_called_once_with("@u:bz", "p") mock_password_provider.reset_mock() @@ -204,7 +205,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # in these cases, but at least we can guard against the API changing # unexpectedly channel = self._send_password_login(" USER🙂NAME ", " pASS\U0001F622word ") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@ USER🙂NAME :test", channel.json_body["user_id"]) mock_password_provider.check_password.assert_called_once_with( "@ USER🙂NAME :test", " pASS😢word " @@ -258,10 +259,10 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # check_password must return an awaitable mock_password_provider.check_password.return_value = make_awaitable(False) channel = self._send_password_login("u", "p") - self.assertEqual(channel.code, 403, channel.result) + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@localuser:test", channel.json_body["user_id"]) @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) @@ -382,7 +383,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # login shouldn't work and should be rejected with a 400 ("unknown login type") channel = self._send_password_login("u", "p") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_password.assert_not_called() @override_config(legacy_providers_config(LegacyCustomAuthProvider)) @@ -406,14 +407,14 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # login with missing param should be rejected channel = self._send_login("test.login_type", "u") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() mock_password_provider.check_auth.return_value = make_awaitable( ("@user:bz", None) ) channel = self._send_login("test.login_type", "u", test_field="y") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@user:bz", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} @@ -427,7 +428,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): ("@ MALFORMED! :bz", None) ) channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@ MALFORMED! :bz", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( " USER🙂NAME ", "test.login_type", {"test_field": " abc "} @@ -510,7 +511,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): ("@user:bz", callback) ) channel = self._send_login("test.login_type", "u", test_field="y") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@user:bz", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} @@ -549,7 +550,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # login shouldn't work and should be rejected with a 400 ("unknown login type") channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() @override_config( @@ -584,7 +585,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # login shouldn't work and should be rejected with a 400 ("unknown login type") channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() @override_config( @@ -615,7 +616,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # login shouldn't work and should be rejected with a 400 ("unknown login type") channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() mock_password_provider.check_password.assert_not_called() @@ -646,13 +647,13 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): ("@localuser:test", None) ) channel = self._send_login("test.login_type", "localuser", test_field="") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) tok1 = channel.json_body["access_token"] channel = self._send_login( "test.login_type", "localuser", test_field="", device_id="dev2" ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # make the initial request which returns a 401 channel = self._delete_device(tok1, "dev2") @@ -721,7 +722,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # password login shouldn't work and should be rejected with a 400 # ("unknown login type") channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) def test_on_logged_out(self): """Tests that the on_logged_out callback is called when the user logs out.""" @@ -884,7 +885,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): }, access_token=tok, ) - self.assertEqual(channel.code, 403, channel.result) + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.THREEPID_DENIED, @@ -906,7 +907,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): }, access_token=tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertIn("sid", channel.json_body) m.assert_called_once_with("email", "bar@test.com", registration) @@ -949,12 +950,12 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): "register", {"auth": {"session": session, "type": LoginType.DUMMY}}, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) return channel.json_body def _get_login_flows(self) -> JsonDict: channel = self.make_request("GET", "/_matrix/client/r0/login") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) return channel.json_body["flows"] def _send_password_login(self, user: str, password: str) -> FakeChannel: diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 97693cd1e2..12db68d564 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1379,7 +1379,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1434,7 +1434,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1512,7 +1512,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"password": "abc123", "admin": False}, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertFalse(channel.json_body["admin"]) @@ -1550,7 +1550,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): ) # Admin user is not blocked by mau anymore - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertFalse(channel.json_body["admin"]) @@ -1585,7 +1585,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) @@ -1626,7 +1626,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) @@ -1666,7 +1666,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("msisdn", channel.json_body["threepids"][0]["medium"]) self.assertEqual("1234567890", channel.json_body["threepids"][0]["address"]) @@ -2407,7 +2407,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"password": "abc123"}, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("bob", channel.json_body["displayname"]) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 1f9b65351e..6d6a26b8f4 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -15,6 +15,7 @@ import json import os import re from email.parser import Parser +from http import HTTPStatus from typing import Any, Dict, List, Optional, Union from unittest.mock import Mock @@ -98,7 +99,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", "/_matrix/client/r0/login", json.dumps(body).encode("utf8") ) - self.assertEqual(channel.code, 403, channel.result) + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) def test_basic_password_reset(self) -> None: """Test basic password reset flow""" @@ -347,7 +348,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): shorthand=False, ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # Now POST to the same endpoint, mimicking the same behaviour as clicking the # password reset confirm button @@ -362,7 +363,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): shorthand=False, content_is_form=True, ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) def _get_link_from_email(self) -> str: assert self.email_attempts, "No emails have been sent" @@ -390,7 +391,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): new_password: str, session_id: str, client_secret: str, - expected_code: int = 200, + expected_code: int = HTTPStatus.OK, ) -> None: channel = self.make_request( "POST", @@ -715,7 +716,9 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user @@ -725,7 +728,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) def test_delete_email(self) -> None: @@ -747,7 +750,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): {"medium": "email", "address": self.email}, access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # Get user channel = self.make_request( @@ -756,7 +759,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) def test_delete_email_if_disabled(self) -> None: @@ -781,7 +784,9 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user @@ -791,7 +796,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) @@ -817,7 +822,9 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user @@ -827,7 +834,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) def test_no_valid_token(self) -> None: @@ -852,7 +859,9 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user @@ -862,7 +871,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) @override_config({"next_link_domain_whitelist": None}) @@ -872,7 +881,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): "something@example.com", "some_secret", next_link="https://example.com/a/good/site", - expect_code=200, + expect_code=HTTPStatus.OK, ) @override_config({"next_link_domain_whitelist": None}) @@ -884,7 +893,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): "something@example.com", "some_secret", next_link="some-protocol://abcdefghijklmopqrstuvwxyz", - expect_code=200, + expect_code=HTTPStatus.OK, ) @override_config({"next_link_domain_whitelist": None}) @@ -895,7 +904,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): "something@example.com", "some_secret", next_link="file:///host/path", - expect_code=400, + expect_code=HTTPStatus.BAD_REQUEST, ) @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) @@ -907,28 +916,28 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): "something@example.com", "some_secret", next_link=None, - expect_code=200, + expect_code=HTTPStatus.OK, ) self._request_token( "something@example.com", "some_secret", next_link="https://example.com/some/good/page", - expect_code=200, + expect_code=HTTPStatus.OK, ) self._request_token( "something@example.com", "some_secret", next_link="https://example.org/some/also/good/page", - expect_code=200, + expect_code=HTTPStatus.OK, ) self._request_token( "something@example.com", "some_secret", next_link="https://bad.example.org/some/bad/page", - expect_code=400, + expect_code=HTTPStatus.BAD_REQUEST, ) @override_config({"next_link_domain_whitelist": []}) @@ -940,7 +949,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): "something@example.com", "some_secret", next_link="https://example.com/a/page", - expect_code=400, + expect_code=HTTPStatus.BAD_REQUEST, ) def _request_token( @@ -948,7 +957,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): email: str, client_secret: str, next_link: Optional[str] = None, - expect_code: int = 200, + expect_code: int = HTTPStatus.OK, ) -> Optional[str]: """Request a validation token to add an email address to a user's account @@ -993,7 +1002,9 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): b"account/3pid/email/requestToken", {"client_secret": client_secret, "email": email, "send_attempt": 1}, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(expected_errcode, channel.json_body["errcode"]) self.assertEqual(expected_error, channel.json_body["error"]) @@ -1002,7 +1013,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): path = link.replace("https://example.com", "") channel = self.make_request("GET", path, shorthand=False) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) def _get_link_from_email(self) -> str: assert self.email_attempts, "No emails have been sent" @@ -1052,7 +1063,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # Get user channel = self.make_request( @@ -1061,7 +1072,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) threepids = {threepid["address"] for threepid in channel.json_body["threepids"]} @@ -1092,7 +1103,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): """Tests that not providing any MXID raises an error.""" self._test_status( users=None, - expected_status_code=400, + expected_status_code=HTTPStatus.BAD_REQUEST, expected_errcode=Codes.MISSING_PARAM, ) @@ -1100,7 +1111,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): """Tests that providing an invalid MXID raises an error.""" self._test_status( users=["bad:test"], - expected_status_code=400, + expected_status_code=HTTPStatus.BAD_REQUEST, expected_errcode=Codes.INVALID_PARAM, ) @@ -1286,7 +1297,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): def _test_status( self, users: Optional[List[str]], - expected_status_code: int = 200, + expected_status_code: int = HTTPStatus.OK, expected_statuses: Optional[Dict[str, Dict[str, bool]]] = None, expected_failures: Optional[List[str]] = None, expected_errcode: Optional[str] = None, diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index f6efa5fe37..e7f5517e34 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -14,6 +14,7 @@ import json import time import urllib.parse +from http import HTTPStatus from typing import Any, Dict, List, Optional from unittest.mock import Mock from urllib.parse import urlencode @@ -261,20 +262,20 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) access_token = channel.json_body["access_token"] device_id = channel.json_body["device_id"] # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -288,7 +289,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # more requests with the expired token should still return a soft-logout self.reactor.advance(3600) channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -296,7 +297,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): self._delete_device(access_token_2, "kermit", "monkey", device_id) channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], False) @@ -307,7 +308,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request( b"DELETE", "devices/" + device_id, access_token=access_token ) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) # check it's a UI-Auth fail self.assertEqual( set(channel.json_body.keys()), @@ -330,7 +331,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): access_token=access_token, content={"auth": auth}, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) @override_config({"session_lifetime": "24h"}) def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None: @@ -341,14 +342,14 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -367,14 +368,14 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -466,7 +467,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): def test_get_login_flows(self) -> None: """GET /login should return password and SSO flows""" channel = self.make_request("GET", "/_matrix/client/r0/login") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) expected_flow_types = [ "m.login.cas", @@ -494,14 +495,14 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): """/login/sso/redirect should redirect to an identity picker""" # first hit the redirect url, which should redirect to our idp picker channel = self._make_sso_redirect_request(None) - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers uri = location_headers[0] # hitting that picker should give us some HTML channel = self.make_request("GET", uri) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # parse the form to check it has fields assumed elsewhere in this class html = channel.result["body"].decode("utf-8") @@ -530,7 +531,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): + "&idp=cas", shorthand=False, ) - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers cas_uri = location_headers[0] @@ -555,7 +556,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + "&idp=saml", ) - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers saml_uri = location_headers[0] @@ -579,7 +580,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + "&idp=oidc", ) - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers oidc_uri = location_headers[0] @@ -606,7 +607,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): channel = self.helper.complete_oidc_auth(oidc_uri, cookies, {"sub": "user1"}) # that should serve a confirmation page - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) content_type_headers = channel.headers.getRawHeaders("Content-Type") assert content_type_headers self.assertTrue(content_type_headers[-1].startswith("text/html")) @@ -634,7 +635,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): "/login", content={"type": "m.login.token", "token": login_token}, ) - self.assertEqual(chan.code, 200, chan.result) + self.assertEqual(chan.code, HTTPStatus.OK, chan.result) self.assertEqual(chan.json_body["user_id"], "@user1:test") def test_multi_sso_redirect_to_unknown(self) -> None: @@ -643,18 +644,18 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): "GET", "/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz", ) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) def test_client_idp_redirect_to_unknown(self) -> None: """If the client tries to pick an unknown IdP, return a 404""" channel = self._make_sso_redirect_request("xxx") - self.assertEqual(channel.code, 404, channel.result) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.result) self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND") def test_client_idp_redirect_to_oidc(self) -> None: """If the client pick a known IdP, redirect to it""" channel = self._make_sso_redirect_request("oidc") - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers oidc_uri = location_headers[0] @@ -765,7 +766,7 @@ class CASTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", cas_ticket_url) # Test that the response is HTML. - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) content_type_header_value = "" for header in channel.result.get("headers", []): if header[0] == b"Content-Type": @@ -1246,7 +1247,7 @@ class UsernamePickerTestCase(HomeserverTestCase): ) # that should redirect to the username picker - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers picker_url = location_headers[0] @@ -1290,7 +1291,7 @@ class UsernamePickerTestCase(HomeserverTestCase): ("Content-Length", str(len(content))), ], ) - self.assertEqual(chan.code, 302, chan.result) + self.assertEqual(chan.code, HTTPStatus.FOUND, chan.result) location_headers = chan.headers.getRawHeaders("Location") assert location_headers @@ -1300,7 +1301,7 @@ class UsernamePickerTestCase(HomeserverTestCase): path=location_headers[0], custom_headers=[("Cookie", "username_mapping_session=" + session_id)], ) - self.assertEqual(chan.code, 302, chan.result) + self.assertEqual(chan.code, HTTPStatus.FOUND, chan.result) location_headers = chan.headers.getRawHeaders("Location") assert location_headers @@ -1325,5 +1326,5 @@ class UsernamePickerTestCase(HomeserverTestCase): "/login", content={"type": "m.login.token", "token": login_token}, ) - self.assertEqual(chan.code, 200, chan.result) + self.assertEqual(chan.code, HTTPStatus.OK, chan.result) self.assertEqual(chan.json_body["user_id"], "@bobby:test") diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 06221b806a..4c6b3decd8 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,6 +18,7 @@ """Tests REST events for /rooms paths.""" import json +from http import HTTPStatus from typing import Any, Dict, Iterable, List, Optional, Tuple, Union from unittest.mock import Mock, call from urllib import parse as urlparse @@ -104,7 +105,7 @@ class RoomPermissionsTestCase(RoomBase): channel = self.make_request( "PUT", self.created_rmid_msg_path, b'{"msgtype":"m.text","body":"test msg"}' ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # set topic for public room channel = self.make_request( @@ -112,7 +113,7 @@ class RoomPermissionsTestCase(RoomBase): ("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode("ascii"), b'{"topic":"Public Room Topic"}', ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # auth as user_id now self.helper.auth_user_id = self.user_id @@ -134,28 +135,28 @@ class RoomPermissionsTestCase(RoomBase): "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), msg_content, ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # send message in created room not joined (no state), expect 403 channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # send message in created room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # send message in created room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # send message in created room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_topic_perms(self) -> None: topic_content = b'{"topic":"My Topic Name"}' @@ -165,28 +166,28 @@ class RoomPermissionsTestCase(RoomBase): channel = self.make_request( "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) channel = self.make_request( "GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room not joined, expect 403 channel = self.make_request("PUT", topic_path, topic_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", topic_path) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # set topic in created PRIVATE room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) channel = self.make_request("PUT", topic_path, topic_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # get topic in created PRIVATE room and invited, expect 403 channel = self.make_request("GET", topic_path) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) @@ -194,25 +195,25 @@ class RoomPermissionsTestCase(RoomBase): # Only room ops can set topic by default self.helper.auth_user_id = self.rmcreator_id channel = self.make_request("PUT", topic_path, topic_content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.helper.auth_user_id = self.user_id channel = self.make_request("GET", topic_path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(topic_content.decode("utf8")), channel.json_body) # set/get topic in created PRIVATE room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", topic_path, topic_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", topic_path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # get topic in PUBLIC room, not joined, expect 403 channel = self.make_request( "GET", "/rooms/%s/state/m.room.topic" % self.created_public_rmid ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # set topic in PUBLIC room, not joined, expect 403 channel = self.make_request( @@ -220,7 +221,7 @@ class RoomPermissionsTestCase(RoomBase): "/rooms/%s/state/m.room.topic" % self.created_public_rmid, topic_content, ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def _test_get_membership( self, room: str, members: Iterable = frozenset(), expect_code: int = 200 @@ -309,14 +310,14 @@ class RoomPermissionsTestCase(RoomBase): src=self.user_id, targ=self.rmcreator_id, membership=Membership.JOIN, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) self.helper.change_membership( room=room, src=self.user_id, targ=self.rmcreator_id, membership=Membership.LEAVE, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) def test_joined_permissions(self) -> None: @@ -342,7 +343,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.user_id, targ=other, membership=Membership.JOIN, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) # set left of other, expect 403 @@ -351,7 +352,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.user_id, targ=other, membership=Membership.LEAVE, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) # set left of self, expect 200 @@ -371,7 +372,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.user_id, targ=usr, membership=Membership.INVITE, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) self.helper.change_membership( @@ -379,7 +380,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.user_id, targ=usr, membership=Membership.JOIN, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) # It is always valid to LEAVE if you've already left (currently.) @@ -388,7 +389,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.user_id, targ=self.rmcreator_id, membership=Membership.LEAVE, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) # tests the "from banned" line from the table in https://spec.matrix.org/unstable/client-server-api/#mroommember @@ -405,7 +406,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.user_id, targ=other, membership=Membership.BAN, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.FORBIDDEN, ) @@ -415,7 +416,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.rmcreator_id, targ=other, membership=Membership.BAN, - expect_code=200, + expect_code=HTTPStatus.OK, ) # from ban to invite: Must never happen. @@ -424,7 +425,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.rmcreator_id, targ=other, membership=Membership.INVITE, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.BAD_STATE, ) @@ -434,7 +435,7 @@ class RoomPermissionsTestCase(RoomBase): src=other, targ=other, membership=Membership.JOIN, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.BAD_STATE, ) @@ -444,7 +445,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.rmcreator_id, targ=other, membership=Membership.BAN, - expect_code=200, + expect_code=HTTPStatus.OK, ) # from ban to knock: Must never happen. @@ -453,7 +454,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.rmcreator_id, targ=other, membership=Membership.KNOCK, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.BAD_STATE, ) @@ -463,7 +464,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.user_id, targ=other, membership=Membership.LEAVE, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.FORBIDDEN, ) @@ -473,7 +474,7 @@ class RoomPermissionsTestCase(RoomBase): src=self.rmcreator_id, targ=other, membership=Membership.LEAVE, - expect_code=200, + expect_code=HTTPStatus.OK, ) @@ -493,7 +494,7 @@ class RoomStateTestCase(RoomBase): "/rooms/%s/state" % room_id, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertCountEqual( [state_event["type"] for state_event in channel.json_body], { @@ -516,7 +517,7 @@ class RoomStateTestCase(RoomBase): "/rooms/%s/state/m.room.member/%s" % (room_id, self.user_id), ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(channel.json_body, {"membership": "join"}) @@ -530,16 +531,16 @@ class RoomsMemberListTestCase(RoomBase): def test_get_member_list(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) def test_get_member_list_no_room(self) -> None: channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission(self) -> None: room_id = self.helper.create_room_as("@some_other_guy:red") channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_with_at_token(self) -> None: """ @@ -550,7 +551,7 @@ class RoomsMemberListTestCase(RoomBase): # first sync to get an at token channel = self.make_request("GET", "/sync") - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) sync_token = channel.json_body["next_batch"] # check that permission is denied for @sid1:red to get the @@ -559,7 +560,7 @@ class RoomsMemberListTestCase(RoomBase): "GET", f"/rooms/{room_id}/members?at={sync_token}", ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_former_member(self) -> None: """ @@ -572,14 +573,14 @@ class RoomsMemberListTestCase(RoomBase): # check that the user can see the member list to start with channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # ban the user self.helper.change_membership(room_id, "@alice:red", self.user_id, "ban") # check the user can no longer see the member list channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_former_member_with_at_token(self) -> None: """ @@ -593,14 +594,14 @@ class RoomsMemberListTestCase(RoomBase): # sync to get an at token channel = self.make_request("GET", "/sync") - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) sync_token = channel.json_body["next_batch"] # check that the user can see the member list to start with channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # ban the user (Note: the user is actually allowed to see this event and # state so that they know they're banned!) @@ -612,14 +613,14 @@ class RoomsMemberListTestCase(RoomBase): # now, with the original user, sync again to get a new at token channel = self.make_request("GET", "/sync") - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) sync_token = channel.json_body["next_batch"] # check the user can no longer see the updated member list channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_mixed_memberships(self) -> None: room_creator = "@some_other_guy:red" @@ -628,17 +629,17 @@ class RoomsMemberListTestCase(RoomBase): self.helper.invite(room=room_id, src=room_creator, targ=self.user_id) # can't see list if you're just invited. channel = self.make_request("GET", room_path) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) self.helper.join(room=room_id, user=self.user_id) # can see list now joined channel = self.make_request("GET", room_path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.helper.leave(room=room_id, user=self.user_id) # can see old list once left channel = self.make_request("GET", room_path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) def test_get_member_list_cancellation(self) -> None: """Test cancellation of a `/rooms/$room_id/members` request.""" @@ -651,7 +652,7 @@ class RoomsMemberListTestCase(RoomBase): "/rooms/%s/members" % room_id, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(len(channel.json_body["chunk"]), 1) self.assertLessEqual( { @@ -671,7 +672,7 @@ class RoomsMemberListTestCase(RoomBase): # first sync to get an at token channel = self.make_request("GET", "/sync") - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) sync_token = channel.json_body["next_batch"] channel = make_request_with_cancellation_test( @@ -682,7 +683,7 @@ class RoomsMemberListTestCase(RoomBase): "/rooms/%s/members?at=%s" % (room_id, sync_token), ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(len(channel.json_body["chunk"]), 1) self.assertLessEqual( { @@ -706,7 +707,7 @@ class RoomsCreateTestCase(RoomBase): # POST with no config keys, expect new room id channel = self.make_request("POST", "/createRoom", "{}") - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None self.assertEqual(36, channel.resource_usage.db_txn_count) @@ -719,7 +720,7 @@ class RoomsCreateTestCase(RoomBase): b'{"initial_state":[{"type": "m.bridge", "content": {}}]}', ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None self.assertEqual(40, channel.resource_usage.db_txn_count) @@ -727,13 +728,13 @@ class RoomsCreateTestCase(RoomBase): def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id channel = self.make_request("POST", "/createRoom", b'{"visibility":"private"}') - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_custom_key(self) -> None: # POST with custom config keys, expect new room id channel = self.make_request("POST", "/createRoom", b'{"custom":"stuff"}') - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_known_and_unknown_keys(self) -> None: @@ -741,16 +742,16 @@ class RoomsCreateTestCase(RoomBase): channel = self.make_request( "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_invalid_content(self) -> None: # POST with invalid content / paths, expect 400 channel = self.make_request("POST", "/createRoom", b'{"visibili') - self.assertEqual(400, channel.code) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code) channel = self.make_request("POST", "/createRoom", b'["hello"]') - self.assertEqual(400, channel.code) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code) def test_post_room_invitees_invalid_mxid(self) -> None: # POST with invalid invitee, see https://github.com/matrix-org/synapse/issues/4088 @@ -758,7 +759,7 @@ class RoomsCreateTestCase(RoomBase): channel = self.make_request( "POST", "/createRoom", b'{"invite":["@alice:example.com "]}' ) - self.assertEqual(400, channel.code) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code) @unittest.override_config({"rc_invites": {"per_room": {"burst_count": 3}}}) def test_post_room_invitees_ratelimit(self) -> None: @@ -782,7 +783,7 @@ class RoomsCreateTestCase(RoomBase): # Test that the invites are correctly ratelimited. channel = self.make_request("POST", "/createRoom", content) - self.assertEqual(400, channel.code) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code) self.assertEqual( "Cannot invite so many users at once", channel.json_body["error"], @@ -795,7 +796,7 @@ class RoomsCreateTestCase(RoomBase): # Test that the invites aren't ratelimited anymore. channel = self.make_request("POST", "/createRoom", content) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) def test_spam_checker_may_join_room_deprecated(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly bypassed @@ -819,7 +820,7 @@ class RoomsCreateTestCase(RoomBase): "/createRoom", {}, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) self.assertEqual(join_mock.call_count, 0) @@ -845,7 +846,7 @@ class RoomsCreateTestCase(RoomBase): "/createRoom", {}, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) self.assertEqual(join_mock.call_count, 0) @@ -865,7 +866,7 @@ class RoomsCreateTestCase(RoomBase): "/createRoom", {}, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) self.assertEqual(join_mock.call_count, 0) @@ -882,54 +883,68 @@ class RoomTopicTestCase(RoomBase): def test_invalid_puts(self) -> None: # missing keys or invalid json channel = self.make_request("PUT", self.path, "{}") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", self.path, '{"_name":"bo"}') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", self.path, '{"nao') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request( "PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]' ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", self.path, "text only") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", self.path, "") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) # valid key, wrong type content = '{"topic":["Topic name"]}' channel = self.make_request("PUT", self.path, content) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) def test_rooms_topic(self) -> None: # nothing should be there channel = self.make_request("GET", self.path) - self.assertEqual(404, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.result["body"]) # valid put content = '{"topic":"Topic name"}' channel = self.make_request("PUT", self.path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # valid get channel = self.make_request("GET", self.path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) def test_rooms_topic_with_extra_keys(self) -> None: # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' channel = self.make_request("PUT", self.path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # valid get channel = self.make_request("GET", self.path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) @@ -945,22 +960,34 @@ class RoomMemberStateTestCase(RoomBase): path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json channel = self.make_request("PUT", path, "{}") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, '{"_name":"bo"}') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, '{"nao') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, "text only") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, "") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) # valid keys, wrong types content = '{"membership":["%s","%s","%s"]}' % ( @@ -969,7 +996,9 @@ class RoomMemberStateTestCase(RoomBase): Membership.LEAVE, ) channel = self.make_request("PUT", path, content.encode("ascii")) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) def test_rooms_members_self(self) -> None: path = "/rooms/%s/state/m.room.member/%s" % ( @@ -980,10 +1009,10 @@ class RoomMemberStateTestCase(RoomBase): # valid join message (NOOP since we made the room) content = '{"membership":"%s"}' % Membership.JOIN channel = self.make_request("PUT", path, content.encode("ascii")) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, content=b"") - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) expected_response = {"membership": Membership.JOIN} self.assertEqual(expected_response, channel.json_body) @@ -998,10 +1027,10 @@ class RoomMemberStateTestCase(RoomBase): # valid invite message content = '{"membership":"%s"}' % Membership.INVITE channel = self.make_request("PUT", path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, content=b"") - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(json.loads(content), channel.json_body) def test_rooms_members_other_custom_keys(self) -> None: @@ -1017,10 +1046,10 @@ class RoomMemberStateTestCase(RoomBase): "Join us!", ) channel = self.make_request("PUT", path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, content=b"") - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(json.loads(content), channel.json_body) @@ -1137,7 +1166,9 @@ class RoomJoinTestCase(RoomBase): # Now make the callback deny all room joins, and check that a join actually fails. return_value = False - self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + self.helper.join( + self.room3, self.user2, expect_code=HTTPStatus.FORBIDDEN, tok=self.tok2 + ) def test_spam_checker_may_join_room(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly called @@ -1205,7 +1236,7 @@ class RoomJoinTestCase(RoomBase): self.helper.join( self.room3, self.user2, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, expect_errcode=return_value, tok=self.tok2, ) @@ -1216,7 +1247,7 @@ class RoomJoinTestCase(RoomBase): self.helper.join( self.room3, self.user2, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, expect_errcode=return_value[0], tok=self.tok2, expect_additional_fields=return_value[1], @@ -1270,7 +1301,7 @@ class RoomJoinRatelimitTestCase(RoomBase): # Update the display name for the user. path = "/_matrix/client/r0/profile/%s/displayname" % self.user_id channel = self.make_request("PUT", path, {"displayname": "John Doe"}) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # Check that all the rooms have been sent a profile update into. for room_id in room_ids: @@ -1335,71 +1366,93 @@ class RoomMessagesTestCase(RoomBase): path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) # missing keys or invalid json channel = self.make_request("PUT", path, b"{}") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b'{"_name":"bo"}') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b'{"nao') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b"text only") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b"") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) def test_rooms_messages_sent(self) -> None: path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) content = b'{"body":"test","msgtype":{"type":"a"}}' channel = self.make_request("PUT", path, content) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) # custom message types content = b'{"body":"test","msgtype":"test.custom.text"}' channel = self.make_request("PUT", path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # m.text message type path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id)) content = b'{"body":"test2","msgtype":"m.text"}' channel = self.make_request("PUT", path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) @parameterized.expand( [ # Allow param( - name="NOT_SPAM", value="NOT_SPAM", expected_code=200, expected_fields={} + name="NOT_SPAM", + value="NOT_SPAM", + expected_code=HTTPStatus.OK, + expected_fields={}, + ), + param( + name="False", + value=False, + expected_code=HTTPStatus.OK, + expected_fields={}, ), - param(name="False", value=False, expected_code=200, expected_fields={}), # Block param( name="scalene string", value="ANY OTHER STRING", - expected_code=403, + expected_code=HTTPStatus.FORBIDDEN, expected_fields={"errcode": "M_FORBIDDEN"}, ), param( name="True", value=True, - expected_code=403, + expected_code=HTTPStatus.FORBIDDEN, expected_fields={"errcode": "M_FORBIDDEN"}, ), param( name="Code", value=Codes.LIMIT_EXCEEDED, - expected_code=403, + expected_code=HTTPStatus.FORBIDDEN, expected_fields={"errcode": "M_LIMIT_EXCEEDED"}, ), param( name="Tuple", value=(Codes.SERVER_NOT_TRUSTED, {"additional_field": "12345"}), - expected_code=403, + expected_code=HTTPStatus.FORBIDDEN, expected_fields={ "errcode": "M_SERVER_NOT_TRUSTED", "additional_field": "12345", @@ -1584,7 +1637,7 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): channel = self.make_request("PUT", path, "{}") # Then I am allowed - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) def test_normal_user_can_not_post_state_event(self) -> None: # Given I am a normal member of a room @@ -1598,7 +1651,7 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): channel = self.make_request("PUT", path, "{}") # Then I am not allowed because state events require PL>=50 - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) self.assertEqual( "You don't have permission to post that to the room. " "user_level (0) < send_level (50)", @@ -1625,7 +1678,7 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): channel = self.make_request("PUT", path, "{}") # Then I am allowed - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) @unittest.override_config( { @@ -1653,7 +1706,7 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): channel = self.make_request("PUT", path, "{}") # Then I am not allowed - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) @unittest.override_config( { @@ -1681,7 +1734,7 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): channel = self.make_request("PUT", path, "{}") # Then I am not allowed - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) self.assertEqual( "You don't have permission to post that to the room. " + "user_level (0) < send_level (1)", @@ -1712,7 +1765,7 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): # Then I am not allowed because the public_chat config does not # affect this room, because this room is a private_chat - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) self.assertEqual( "You don't have permission to post that to the room. " + "user_level (0) < send_level (50)", @@ -1731,7 +1784,7 @@ class RoomInitialSyncTestCase(RoomBase): def test_initial_sync(self) -> None: channel = self.make_request("GET", "/rooms/%s/initialSync" % self.room_id) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertEqual(self.room_id, channel.json_body["room_id"]) self.assertEqual("join", channel.json_body["membership"]) @@ -1774,7 +1827,7 @@ class RoomMessageListTestCase(RoomBase): channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("start" in channel.json_body) self.assertEqual(token, channel.json_body["start"]) self.assertTrue("chunk" in channel.json_body) @@ -1785,7 +1838,7 @@ class RoomMessageListTestCase(RoomBase): channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("start" in channel.json_body) self.assertEqual(token, channel.json_body["start"]) self.assertTrue("chunk" in channel.json_body) @@ -1824,7 +1877,7 @@ class RoomMessageListTestCase(RoomBase): json.dumps({"types": [EventTypes.Message]}), ), ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) chunk = channel.json_body["chunk"] self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) @@ -1852,7 +1905,7 @@ class RoomMessageListTestCase(RoomBase): json.dumps({"types": [EventTypes.Message]}), ), ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) chunk = channel.json_body["chunk"] self.assertEqual(len(chunk), 1, [event["content"] for event in chunk]) @@ -1869,7 +1922,7 @@ class RoomMessageListTestCase(RoomBase): json.dumps({"types": [EventTypes.Message]}), ), ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) chunk = channel.json_body["chunk"] self.assertEqual(len(chunk), 0, [event["content"] for event in chunk]) @@ -1997,14 +2050,14 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): def test_restricted_no_auth(self) -> None: channel = self.make_request("GET", self.url) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) def test_restricted_auth(self) -> None: self.register_user("user", "pass") tok = self.login("user", "pass") channel = self.make_request("GET", self.url, access_token=tok) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): @@ -2123,7 +2176,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): content={"filter": search_filter}, access_token=self.token, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.federation_client.get_public_rooms.assert_called_once_with( # type: ignore[attr-defined] "testserv", @@ -2140,7 +2193,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): # The `get_public_rooms` should be called again if the first call fails # with a 404, when using search filters. self.federation_client.get_public_rooms.side_effect = ( # type: ignore[attr-defined] - HttpResponseException(404, "Not Found", b""), + HttpResponseException(HTTPStatus.NOT_FOUND, "Not Found", b""), make_awaitable({}), ) @@ -2152,7 +2205,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): content={"filter": search_filter}, access_token=self.token, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.federation_client.get_public_rooms.assert_has_calls( # type: ignore[attr-defined] [ @@ -2206,7 +2259,7 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): request_data, access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) @@ -2220,7 +2273,7 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): request_data, access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) event_id = channel.json_body["event_id"] channel = self.make_request( @@ -2228,7 +2281,7 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) res_displayname = channel.json_body["content"]["displayname"] self.assertEqual(res_displayname, self.displayname, channel.result) @@ -2262,7 +2315,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason}, access_token=self.second_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2276,7 +2329,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason}, access_token=self.second_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2290,7 +2343,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason, "user_id": self.second_user_id}, access_token=self.second_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2304,7 +2357,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2316,7 +2369,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2328,7 +2381,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2347,7 +2400,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): content={"reason": reason}, access_token=self.second_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2359,7 +2412,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): ), access_token=self.creator_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) event_content = channel.json_body @@ -2407,7 +2460,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id, json.dumps(self.FILTER_LABELS)), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -2437,7 +2490,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -2472,7 +2525,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id, json.dumps(self.FILTER_LABELS_NOT_LABELS)), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -2820,7 +2873,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): "/rooms/%s/messages?filter=%s&dir=b" % (self.room_id, json.dumps(filter)), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) return channel.json_body["chunk"] @@ -2925,7 +2978,7 @@ class ContextTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -2991,7 +3044,7 @@ class ContextTestCase(unittest.HomeserverTestCase): % (self.room_id, event_id), access_token=invited_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] -- cgit 1.4.1 From 5d4028f217f178fcd384d5bfddd92225b4e78c51 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Sun, 17 Jul 2022 23:19:43 +0200 Subject: Make all `process_replication_rows` methods async (#13304) More prep work for asyncronous caching, also makes all process_replication_rows methods consistent (presence handler already is so). Signed off by Nick @ Beeper (@Fizzadar) --- changelog.d/13304.misc | 1 + synapse/handlers/typing.py | 4 ++-- synapse/replication/slave/storage/devices.py | 6 ++++-- synapse/replication/slave/storage/push_rule.py | 6 ++++-- synapse/replication/slave/storage/pushers.py | 6 ++++-- synapse/replication/tcp/client.py | 6 ++++-- synapse/storage/_base.py | 2 +- synapse/storage/databases/main/account_data.py | 4 ++-- synapse/storage/databases/main/cache.py | 4 ++-- synapse/storage/databases/main/deviceinbox.py | 6 ++++-- synapse/storage/databases/main/events_worker.py | 4 ++-- synapse/storage/databases/main/presence.py | 6 ++++-- synapse/storage/databases/main/receipts.py | 6 ++++-- synapse/storage/databases/main/tags.py | 4 ++-- 14 files changed, 40 insertions(+), 25 deletions(-) create mode 100644 changelog.d/13304.misc diff --git a/changelog.d/13304.misc b/changelog.d/13304.misc new file mode 100644 index 0000000000..156d3d71d7 --- /dev/null +++ b/changelog.d/13304.misc @@ -0,0 +1 @@ +Make all replication row processing methods asynchronous. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index d104ea07fe..26edeeca3c 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -158,7 +158,7 @@ class FollowerTypingHandler: except Exception: logger.exception("Error pushing typing notif to remotes") - def process_replication_rows( + async def process_replication_rows( self, token: int, rows: List[TypingStream.TypingStreamRow] ) -> None: """Should be called whenever we receive updates for typing stream.""" @@ -444,7 +444,7 @@ class TypingWriterHandler(FollowerTypingHandler): return rows, current_id, limited - def process_replication_rows( + async def process_replication_rows( self, token: int, rows: List[TypingStream.TypingStreamRow] ) -> None: # The writing process should never get updates from replication. diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index a48cc02069..22f7999721 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -49,7 +49,7 @@ class SlavedDeviceStore(DeviceWorkerStore, BaseSlavedStore): def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == DeviceListsStream.NAME: @@ -59,7 +59,9 @@ class SlavedDeviceStore(DeviceWorkerStore, BaseSlavedStore): self._device_list_id_gen.advance(instance_name, token) for row in rows: self._user_signature_stream_cache.entity_has_changed(row.user_id, token) - return super().process_replication_rows(stream_name, instance_name, token, rows) + return await super().process_replication_rows( + stream_name, instance_name, token, rows + ) def _invalidate_caches_for_devices( self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow] diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py index 52ee3f7e58..e1838a81a9 100644 --- a/synapse/replication/slave/storage/push_rule.py +++ b/synapse/replication/slave/storage/push_rule.py @@ -24,7 +24,7 @@ class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore): def get_max_push_rules_stream_id(self) -> int: return self._push_rules_stream_id_gen.get_current_token() - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == PushRulesStream.NAME: @@ -33,4 +33,6 @@ class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore): self.get_push_rules_for_user.invalidate((row.user_id,)) self.get_push_rules_enabled_for_user.invalidate((row.user_id,)) self.push_rules_stream_cache.entity_has_changed(row.user_id, token) - return super().process_replication_rows(stream_name, instance_name, token, rows) + return await super().process_replication_rows( + stream_name, instance_name, token, rows + ) diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index de642bba71..fb3f5653af 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -40,9 +40,11 @@ class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): def get_pushers_stream_token(self) -> int: return self._pushers_id_gen.get_current_token() - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == PushersStream.NAME: self._pushers_id_gen.advance(instance_name, token) - return super().process_replication_rows(stream_name, instance_name, token, rows) + return await super().process_replication_rows( + stream_name, instance_name, token, rows + ) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 2f59245058..f9722ccb4f 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -144,13 +144,15 @@ class ReplicationDataHandler: token: stream token for this batch of rows rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row. """ - self.store.process_replication_rows(stream_name, instance_name, token, rows) + await self.store.process_replication_rows( + stream_name, instance_name, token, rows + ) if self.send_handler: await self.send_handler.process_replication_rows(stream_name, token, rows) if stream_name == TypingStream.NAME: - self._typing_handler.process_replication_rows(token, rows) + await self._typing_handler.process_replication_rows(token, rows) self.notifier.on_new_event( StreamKeyType.TYPING, token, rooms=[row.room_id for row in rows] ) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b8c8dcd76b..822108e83b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -47,7 +47,7 @@ class SQLBaseStore(metaclass=ABCMeta): self.database_engine = database.engine self.db_pool = database - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 9af9f4f18e..337b22294e 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -414,7 +414,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) ) - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, @@ -437,7 +437,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) self._account_data_stream_cache.entity_has_changed(row.user_id, token) - super().process_replication_rows(stream_name, instance_name, token, rows) + await super().process_replication_rows(stream_name, instance_name, token, rows) async def add_account_data_to_room( self, user_id: str, room_id: str, account_data_type: str, content: JsonDict diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 2367ddeea3..048ff3e1b7 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -119,7 +119,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "get_all_updated_caches", get_all_updated_caches_txn ) - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == EventsStream.NAME: @@ -154,7 +154,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): else: self._attempt_to_invalidate_cache(row.cache_func, row.keys) - super().process_replication_rows(stream_name, instance_name, token, rows) + await super().process_replication_rows(stream_name, instance_name, token, rows) def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None: data = row.data diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 422e0e65ca..45fe58c104 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -128,7 +128,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): prefilled_cache=device_outbox_prefill, ) - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, @@ -148,7 +148,9 @@ class DeviceInboxWorkerStore(SQLBaseStore): self._device_federation_outbox_stream_cache.entity_has_changed( row.entity, token ) - return super().process_replication_rows(stream_name, instance_name, token, rows) + return await super().process_replication_rows( + stream_name, instance_name, token, rows + ) def get_to_device_stream_token(self) -> int: return self._device_inbox_id_gen.get_current_token() diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index f3935bfead..5310d4eda2 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -280,7 +280,7 @@ class EventsWorkerStore(SQLBaseStore): id_column="chain_id", ) - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, @@ -292,7 +292,7 @@ class EventsWorkerStore(SQLBaseStore): elif stream_name == BackfillStream.NAME: self._backfill_id_gen.advance(instance_name, -token) - super().process_replication_rows(stream_name, instance_name, token, rows) + await super().process_replication_rows(stream_name, instance_name, token, rows) async def have_censored_event(self, event_id: str) -> bool: """Check if an event has been censored, i.e. if the content of the event has been erased diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 9769a18a9d..9fe3124b35 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -431,7 +431,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) self._presence_on_startup = [] return active_on_startup - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, @@ -443,4 +443,6 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) for row in rows: self.presence_stream_cache.entity_has_changed(row.user_id, token) self._get_presence_for_user.invalidate((row.user_id,)) - return super().process_replication_rows(stream_name, instance_name, token, rows) + return await super().process_replication_rows( + stream_name, instance_name, token, rows + ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 0090c9f225..f85862d968 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -589,7 +589,7 @@ class ReceiptsWorkerStore(SQLBaseStore): "get_unread_event_push_actions_by_room_for_user", (room_id,) ) - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, @@ -604,7 +604,9 @@ class ReceiptsWorkerStore(SQLBaseStore): ) self._receipts_stream_cache.entity_has_changed(row.room_id, token) - return super().process_replication_rows(stream_name, instance_name, token, rows) + return await super().process_replication_rows( + stream_name, instance_name, token, rows + ) def _insert_linearized_receipt_txn( self, diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index b0f5de67a3..5e8905369c 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -292,7 +292,7 @@ class TagsWorkerStore(AccountDataWorkerStore): # than the id that the client has. pass - def process_replication_rows( + async def process_replication_rows( self, stream_name: str, instance_name: str, @@ -305,7 +305,7 @@ class TagsWorkerStore(AccountDataWorkerStore): self.get_tags_for_user.invalidate((row.user_id,)) self._account_data_stream_cache.entity_has_changed(row.user_id, token) - super().process_replication_rows(stream_name, instance_name, token, rows) + await super().process_replication_rows(stream_name, instance_name, token, rows) class TagsStore(TagsWorkerStore): -- cgit 1.4.1 From efee345b454ac5e6aeb4b4128793be1fbc308b91 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Sun, 17 Jul 2022 23:28:45 +0200 Subject: Remove unnecessary `json.dumps` from tests (#13303) --- changelog.d/13303.misc | 1 + tests/rest/client/test_account.py | 23 +++---- tests/rest/client/test_directory.py | 16 ++--- tests/rest/client/test_identity.py | 4 +- tests/rest/client/test_login.py | 3 +- tests/rest/client/test_password_policy.py | 31 ++++----- tests/rest/client/test_register.py | 103 +++++++++++++----------------- tests/rest/client/test_report_event.py | 7 +- tests/rest/client/test_rooms.py | 74 +++++++++------------ tests/rest/client/test_sync.py | 3 +- tests/rest/client/utils.py | 8 +-- tests/test_terms_auth.py | 39 +++++------ tests/unittest.py | 31 ++++----- 13 files changed, 143 insertions(+), 200 deletions(-) create mode 100644 changelog.d/13303.misc diff --git a/changelog.d/13303.misc b/changelog.d/13303.misc new file mode 100644 index 0000000000..03f64ab171 --- /dev/null +++ b/changelog.d/13303.misc @@ -0,0 +1 @@ +Remove unnecessary `json.dumps` from tests. \ No newline at end of file diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 6d6a26b8f4..7ae926dc9c 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json import os import re from email.parser import Parser @@ -96,9 +95,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): """ body = {"type": "m.login.password", "user": username, "password": password} - channel = self.make_request( - "POST", "/_matrix/client/r0/login", json.dumps(body).encode("utf8") - ) + channel = self.make_request("POST", "/_matrix/client/r0/login", body) self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) def test_basic_password_reset(self) -> None: @@ -480,16 +477,14 @@ class DeactivateTestCase(unittest.HomeserverTestCase): self.assertEqual(memberships[0].room_id, room_id, memberships) def deactivate(self, user_id: str, tok: str) -> None: - request_data = json.dumps( - { - "auth": { - "type": "m.login.password", - "user": user_id, - "password": "test", - }, - "erase": False, - } - ) + request_data = { + "auth": { + "type": "m.login.password", + "user": user_id, + "password": "test", + }, + "erase": False, + } channel = self.make_request( "POST", "account/deactivate", request_data, access_token=tok ) diff --git a/tests/rest/client/test_directory.py b/tests/rest/client/test_directory.py index 16e7ef41bc..7a88aa2cda 100644 --- a/tests/rest/client/test_directory.py +++ b/tests/rest/client/test_directory.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json from http import HTTPStatus from twisted.test.proto_helpers import MemoryReactor @@ -97,8 +96,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): # We use deliberately a localpart under the length threshold so # that we can make sure that the check is done on the whole alias. - data = {"room_alias_name": random_string(256 - len(self.hs.hostname))} - request_data = json.dumps(data) + request_data = {"room_alias_name": random_string(256 - len(self.hs.hostname))} channel = self.make_request( "POST", url, request_data, access_token=self.user_tok ) @@ -110,8 +108,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): # Check with an alias of allowed length. There should already be # a test that ensures it works in test_register.py, but let's be # as cautious as possible here. - data = {"room_alias_name": random_string(5)} - request_data = json.dumps(data) + request_data = {"room_alias_name": random_string(5)} channel = self.make_request( "POST", url, request_data, access_token=self.user_tok ) @@ -144,8 +141,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): # Add an alias for the room, as the appservice alias = RoomAlias(f"asns-{random_string(5)}", self.hs.hostname).to_string() - data = {"room_id": self.room_id} - request_data = json.dumps(data) + request_data = {"room_id": self.room_id} channel = self.make_request( "PUT", @@ -193,8 +189,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.hs.hostname, ) - data = {"aliases": [self.random_alias(alias_length)]} - request_data = json.dumps(data) + request_data = {"aliases": [self.random_alias(alias_length)]} channel = self.make_request( "PUT", url, request_data, access_token=self.user_tok @@ -206,8 +201,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): ) -> str: alias = self.random_alias(alias_length) url = "/_matrix/client/r0/directory/room/%s" % alias - data = {"room_id": self.room_id} - request_data = json.dumps(data) + request_data = {"room_id": self.room_id} channel = self.make_request( "PUT", url, request_data, access_token=self.user_tok diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index 299b9d21e2..dc17c9d113 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json from http import HTTPStatus from twisted.test.proto_helpers import MemoryReactor @@ -51,12 +50,11 @@ class IdentityTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, HTTPStatus.OK, channel.result) room_id = channel.json_body["room_id"] - params = { + request_data = { "id_server": "testis", "medium": "email", "address": "test@example.com", } - request_data = json.dumps(params) request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii") channel = self.make_request( b"POST", request_url, request_data, access_token=tok diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index e7f5517e34..a2958f6959 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json import time import urllib.parse from http import HTTPStatus @@ -400,7 +399,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", "/_matrix/client/v3/login", - json.dumps(body).encode("utf8"), + body, custom_headers=None, ) diff --git a/tests/rest/client/test_password_policy.py b/tests/rest/client/test_password_policy.py index 3a74d2e96c..e19d21d6ee 100644 --- a/tests/rest/client/test_password_policy.py +++ b/tests/rest/client/test_password_policy.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json from http import HTTPStatus from twisted.test.proto_helpers import MemoryReactor @@ -89,7 +88,7 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): ) def test_password_too_short(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "shorty"}) + request_data = {"username": "kermit", "password": "shorty"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -100,7 +99,7 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): ) def test_password_no_digit(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "longerpassword"}) + request_data = {"username": "kermit", "password": "longerpassword"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -111,7 +110,7 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): ) def test_password_no_symbol(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"}) + request_data = {"username": "kermit", "password": "l0ngerpassword"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -122,7 +121,7 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): ) def test_password_no_uppercase(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"}) + request_data = {"username": "kermit", "password": "l0ngerpassword!"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -133,7 +132,7 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): ) def test_password_no_lowercase(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"}) + request_data = {"username": "kermit", "password": "L0NGERPASSWORD!"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -144,7 +143,7 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): ) def test_password_compliant(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"}) + request_data = {"username": "kermit", "password": "L0ngerpassword!"} channel = self.make_request("POST", self.register_url, request_data) # Getting a 401 here means the password has passed validation and the server has @@ -161,16 +160,14 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): user_id = self.register_user("kermit", compliant_password) tok = self.login("kermit", compliant_password) - request_data = json.dumps( - { - "new_password": not_compliant_password, - "auth": { - "password": compliant_password, - "type": LoginType.PASSWORD, - "user": user_id, - }, - } - ) + request_data = { + "new_password": not_compliant_password, + "auth": { + "password": compliant_password, + "type": LoginType.PASSWORD, + "user": user_id, + }, + } channel = self.make_request( "POST", "/_matrix/client/r0/account/password", diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index cb27458746..071b488cc0 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import datetime -import json import os from typing import Any, Dict, List, Tuple @@ -62,9 +61,10 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) self.hs.get_datastores().main.services_cache.append(appservice) - request_data = json.dumps( - {"username": "as_user_kermit", "type": APP_SERVICE_REGISTRATION_TYPE} - ) + request_data = { + "username": "as_user_kermit", + "type": APP_SERVICE_REGISTRATION_TYPE, + } channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data @@ -85,7 +85,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) self.hs.get_datastores().main.services_cache.append(appservice) - request_data = json.dumps({"username": "as_user_kermit"}) + request_data = {"username": "as_user_kermit"} channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data @@ -95,9 +95,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): def test_POST_appservice_registration_invalid(self) -> None: self.appservice = None # no application service exists - request_data = json.dumps( - {"username": "kermit", "type": APP_SERVICE_REGISTRATION_TYPE} - ) + request_data = {"username": "kermit", "type": APP_SERVICE_REGISTRATION_TYPE} channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) @@ -105,14 +103,14 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"401", channel.result) def test_POST_bad_password(self) -> None: - request_data = json.dumps({"username": "kermit", "password": 666}) + request_data = {"username": "kermit", "password": 666} channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.result["code"], b"400", channel.result) self.assertEqual(channel.json_body["error"], "Invalid password") def test_POST_bad_username(self) -> None: - request_data = json.dumps({"username": 777, "password": "monkey"}) + request_data = {"username": 777, "password": "monkey"} channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.result["code"], b"400", channel.result) @@ -121,13 +119,12 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): def test_POST_user_valid(self) -> None: user_id = "@kermit:test" device_id = "frogfone" - params = { + request_data = { "username": "kermit", "password": "monkey", "device_id": device_id, "auth": {"type": LoginType.DUMMY}, } - request_data = json.dumps(params) channel = self.make_request(b"POST", self.url, request_data) det_data = { @@ -140,7 +137,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): @override_config({"enable_registration": False}) def test_POST_disabled_registration(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "monkey"}) + request_data = {"username": "kermit", "password": "monkey"} self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None) channel = self.make_request(b"POST", self.url, request_data) @@ -188,13 +185,12 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting(self) -> None: for i in range(0, 6): - params = { + request_data = { "username": "kermit" + str(i), "password": "monkey", "device_id": "frogfone", "auth": {"type": LoginType.DUMMY}, } - request_data = json.dumps(params) channel = self.make_request(b"POST", self.url, request_data) if i == 5: @@ -234,7 +230,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): } # Request without auth to get flows and session - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] # Synapse adds a dummy stage to differentiate flows where otherwise one @@ -251,8 +247,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "token": token, "session": session, } - request_data = json.dumps(params) - channel = self.make_request(b"POST", self.url, request_data) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) completed = channel.json_body["completed"] self.assertCountEqual([LoginType.REGISTRATION_TOKEN], completed) @@ -262,8 +257,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "type": LoginType.DUMMY, "session": session, } - request_data = json.dumps(params) - channel = self.make_request(b"POST", self.url, request_data) + channel = self.make_request(b"POST", self.url, params) det_data = { "user_id": f"@{username}:{self.hs.hostname}", "home_server": self.hs.hostname, @@ -290,7 +284,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "password": "monkey", } # Request without auth to get session - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) session = channel.json_body["session"] # Test with token param missing (invalid) @@ -298,21 +292,21 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "type": LoginType.REGISTRATION_TOKEN, "session": session, } - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.MISSING_PARAM) self.assertEqual(channel.json_body["completed"], []) # Test with non-string (invalid) params["auth"]["token"] = 1234 - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) self.assertEqual(channel.json_body["completed"], []) # Test with unknown token (invalid) params["auth"]["token"] = "1234" - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -337,9 +331,9 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): params1: JsonDict = {"username": "bert", "password": "monkey"} params2: JsonDict = {"username": "ernie", "password": "monkey"} # Do 2 requests without auth to get two session IDs - channel1 = self.make_request(b"POST", self.url, json.dumps(params1)) + channel1 = self.make_request(b"POST", self.url, params1) session1 = channel1.json_body["session"] - channel2 = self.make_request(b"POST", self.url, json.dumps(params2)) + channel2 = self.make_request(b"POST", self.url, params2) session2 = channel2.json_body["session"] # Use token with session1 and check `pending` is 1 @@ -348,9 +342,9 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "token": token, "session": session1, } - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) # Repeat request to make sure pending isn't increased again - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) pending = self.get_success( store.db_pool.simple_select_one_onecol( "registration_tokens", @@ -366,14 +360,14 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "token": token, "session": session2, } - channel = self.make_request(b"POST", self.url, json.dumps(params2)) + channel = self.make_request(b"POST", self.url, params2) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) # Complete registration with session1 params1["auth"]["type"] = LoginType.DUMMY - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) # Check pending=0 and completed=1 res = self.get_success( store.db_pool.simple_select_one( @@ -386,7 +380,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.assertEqual(res["completed"], 1) # Check auth still fails when using token with session2 - channel = self.make_request(b"POST", self.url, json.dumps(params2)) + channel = self.make_request(b"POST", self.url, params2) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -411,7 +405,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) params: JsonDict = {"username": "kermit", "password": "monkey"} # Request without auth to get session - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) session = channel.json_body["session"] # Check authentication fails with expired token @@ -420,7 +414,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "token": token, "session": session, } - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -435,7 +429,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) # Check authentication succeeds - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) completed = channel.json_body["completed"] self.assertCountEqual([LoginType.REGISTRATION_TOKEN], completed) @@ -460,9 +454,9 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): # Do 2 requests without auth to get two session IDs params1: JsonDict = {"username": "bert", "password": "monkey"} params2: JsonDict = {"username": "ernie", "password": "monkey"} - channel1 = self.make_request(b"POST", self.url, json.dumps(params1)) + channel1 = self.make_request(b"POST", self.url, params1) session1 = channel1.json_body["session"] - channel2 = self.make_request(b"POST", self.url, json.dumps(params2)) + channel2 = self.make_request(b"POST", self.url, params2) session2 = channel2.json_body["session"] # Use token with both sessions @@ -471,18 +465,18 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "token": token, "session": session1, } - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) params2["auth"] = { "type": LoginType.REGISTRATION_TOKEN, "token": token, "session": session2, } - self.make_request(b"POST", self.url, json.dumps(params2)) + self.make_request(b"POST", self.url, params2) # Complete registration with session1 params1["auth"]["type"] = LoginType.DUMMY - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) # Check `result` of registration token stage for session1 is `True` result1 = self.get_success( @@ -550,7 +544,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): # Do request without auth to get a session ID params: JsonDict = {"username": "kermit", "password": "monkey"} - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) session = channel.json_body["session"] # Use token @@ -559,7 +553,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "token": token, "session": session, } - self.make_request(b"POST", self.url, json.dumps(params)) + self.make_request(b"POST", self.url, params) # Delete token self.get_success( @@ -827,8 +821,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): admin_tok = self.login("admin", "adminpassword") url = "/_synapse/admin/v1/account_validity/validity" - params = {"user_id": user_id} - request_data = json.dumps(params) + request_data = {"user_id": user_id} channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) self.assertEqual(channel.result["code"], b"200", channel.result) @@ -845,12 +838,11 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): admin_tok = self.login("admin", "adminpassword") url = "/_synapse/admin/v1/account_validity/validity" - params = { + request_data = { "user_id": user_id, "expiration_ts": 0, "enable_renewal_emails": False, } - request_data = json.dumps(params) channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) self.assertEqual(channel.result["code"], b"200", channel.result) @@ -870,12 +862,11 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): admin_tok = self.login("admin", "adminpassword") url = "/_synapse/admin/v1/account_validity/validity" - params = { + request_data = { "user_id": user_id, "expiration_ts": 0, "enable_renewal_emails": False, } - request_data = json.dumps(params) channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) self.assertEqual(channel.result["code"], b"200", channel.result) @@ -1041,16 +1032,14 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): (user_id, tok) = self.create_user() - request_data = json.dumps( - { - "auth": { - "type": "m.login.password", - "user": user_id, - "password": "monkey", - }, - "erase": False, - } - ) + request_data = { + "auth": { + "type": "m.login.password", + "user": user_id, + "password": "monkey", + }, + "erase": False, + } channel = self.make_request( "POST", "account/deactivate", request_data, access_token=tok ) diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index 20a259fc43..ad0d0209f7 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json - from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -77,10 +75,7 @@ class ReportEventTestCase(unittest.HomeserverTestCase): def _assert_status(self, response_status: int, data: JsonDict) -> None: channel = self.make_request( - "POST", - self.report_path, - json.dumps(data), - access_token=self.other_user_tok, + "POST", self.report_path, data, access_token=self.other_user_tok ) self.assertEqual( response_status, int(channel.result["code"]), msg=channel.result["body"] diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 4c6b3decd8..c60ca604e9 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -770,16 +770,14 @@ class RoomsCreateTestCase(RoomBase): # Build the request's content. We use local MXIDs because invites over federation # are more difficult to mock. - content = json.dumps( - { - "invite": [ - "@alice1:red", - "@alice2:red", - "@alice3:red", - "@alice4:red", - ] - } - ).encode("utf8") + content = { + "invite": [ + "@alice1:red", + "@alice2:red", + "@alice3:red", + "@alice4:red", + ] + } # Test that the invites are correctly ratelimited. channel = self.make_request("POST", "/createRoom", content) @@ -2251,8 +2249,7 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): # Set a profile for the test user self.displayname = "test user" - data = {"displayname": self.displayname} - request_data = json.dumps(data) + request_data = {"displayname": self.displayname} channel = self.make_request( "PUT", "/_matrix/client/r0/profile/%s/displayname" % (self.user_id,), @@ -2264,8 +2261,7 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) def test_per_room_profile_forbidden(self) -> None: - data = {"membership": "join", "displayname": "other test user"} - request_data = json.dumps(data) + request_data = {"membership": "join", "displayname": "other test user"} channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" @@ -2605,16 +2601,14 @@ class LabelsTestCase(unittest.HomeserverTestCase): def test_search_filter_labels(self) -> None: """Test that we can filter by a label on a /search request.""" - request_data = json.dumps( - { - "search_categories": { - "room_events": { - "search_term": "label", - "filter": self.FILTER_LABELS, - } + request_data = { + "search_categories": { + "room_events": { + "search_term": "label", + "filter": self.FILTER_LABELS, } } - ) + } self._send_labelled_messages_in_room() @@ -2642,16 +2636,14 @@ class LabelsTestCase(unittest.HomeserverTestCase): def test_search_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /search request.""" - request_data = json.dumps( - { - "search_categories": { - "room_events": { - "search_term": "label", - "filter": self.FILTER_NOT_LABELS, - } + request_data = { + "search_categories": { + "room_events": { + "search_term": "label", + "filter": self.FILTER_NOT_LABELS, } } - ) + } self._send_labelled_messages_in_room() @@ -2691,16 +2683,14 @@ class LabelsTestCase(unittest.HomeserverTestCase): """Test that we can filter by both a label and the absence of another label on a /search request. """ - request_data = json.dumps( - { - "search_categories": { - "room_events": { - "search_term": "label", - "filter": self.FILTER_LABELS_NOT_LABELS, - } + request_data = { + "search_categories": { + "room_events": { + "search_term": "label", + "filter": self.FILTER_LABELS_NOT_LABELS, } } - ) + } self._send_labelled_messages_in_room() @@ -3145,8 +3135,7 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): def _set_alias_via_directory(self, alias: str, expected_code: int = 200) -> None: url = "/_matrix/client/r0/directory/room/" + alias - data = {"room_id": self.room_id} - request_data = json.dumps(data) + request_data = {"room_id": self.room_id} channel = self.make_request( "PUT", url, request_data, access_token=self.room_owner_tok @@ -3175,8 +3164,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): def _set_alias_via_directory(self, alias: str, expected_code: int = 200) -> None: url = "/_matrix/client/r0/directory/room/" + alias - data = {"room_id": self.room_id} - request_data = json.dumps(data) + request_data = {"room_id": self.room_id} channel = self.make_request( "PUT", url, request_data, access_token=self.room_owner_tok @@ -3202,7 +3190,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): channel = self.make_request( "PUT", "rooms/%s/state/m.room.canonical_alias" % (self.room_id,), - json.dumps(content), + content, access_token=self.room_owner_tok, ) self.assertEqual(channel.code, expected_code, channel.result) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index e3efd1f1b0..b085c50356 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -606,11 +606,10 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): self._check_unread_count(1) # Send a read receipt to tell the server we've read the latest event. - body = json.dumps({ReceiptTypes.READ: res["event_id"]}).encode("utf8") channel = self.make_request( "POST", f"/rooms/{self.room_id}/read_markers", - body, + {ReceiptTypes.READ: res["event_id"]}, access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.json_body) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 93f749744d..105d418698 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -136,7 +136,7 @@ class RestHelper: self.site, "POST", path, - json.dumps(content).encode("utf8"), + content, custom_headers=custom_headers, ) @@ -210,7 +210,7 @@ class RestHelper: self.site, "POST", path, - json.dumps(data).encode("utf8"), + data, ) assert ( @@ -309,7 +309,7 @@ class RestHelper: self.site, "PUT", path, - json.dumps(data).encode("utf8"), + data, ) assert ( @@ -392,7 +392,7 @@ class RestHelper: self.site, "PUT", path, - json.dumps(content or {}).encode("utf8"), + content or {}, custom_headers=custom_headers, ) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 37fada5c53..d3c13cf14c 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactorClock @@ -51,7 +50,7 @@ class TermsTestCase(unittest.HomeserverTestCase): def test_ui_auth(self): # Do a UI auth request - request_data = json.dumps({"username": "kermit", "password": "monkey"}) + request_data = {"username": "kermit", "password": "monkey"} channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.result["code"], b"401", channel.result) @@ -82,16 +81,14 @@ class TermsTestCase(unittest.HomeserverTestCase): self.assertDictContainsSubset(channel.json_body["params"], expected_params) # We have to complete the dummy auth stage before completing the terms stage - request_data = json.dumps( - { - "username": "kermit", - "password": "monkey", - "auth": { - "session": channel.json_body["session"], - "type": "m.login.dummy", - }, - } - ) + request_data = { + "username": "kermit", + "password": "monkey", + "auth": { + "session": channel.json_body["session"], + "type": "m.login.dummy", + }, + } self.registration_handler.check_username = Mock(return_value=True) @@ -102,16 +99,14 @@ class TermsTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"401", channel.result) # Finish the UI auth for terms - request_data = json.dumps( - { - "username": "kermit", - "password": "monkey", - "auth": { - "session": channel.json_body["session"], - "type": "m.login.terms", - }, - } - ) + request_data = { + "username": "kermit", + "password": "monkey", + "auth": { + "session": channel.json_body["session"], + "type": "m.login.terms", + }, + } channel = self.make_request(b"POST", self.url, request_data) # We're interested in getting a response that looks like a successful diff --git a/tests/unittest.py b/tests/unittest.py index 7b97a4bf6e..9f1ff774a8 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -16,7 +16,6 @@ import gc import hashlib import hmac -import json import logging import secrets import time @@ -619,20 +618,16 @@ class HomeserverTestCase(TestCase): want_mac.update(nonce.encode("ascii") + b"\x00" + nonce_str) want_mac_digest = want_mac.hexdigest() - body = json.dumps( - { - "nonce": nonce, - "username": username, - "displayname": displayname, - "password": password, - "admin": admin, - "mac": want_mac_digest, - "inhibit_login": True, - } - ) - channel = self.make_request( - "POST", "/_synapse/admin/v1/register", body.encode("utf8") - ) + body = { + "nonce": nonce, + "username": username, + "displayname": displayname, + "password": password, + "admin": admin, + "mac": want_mac_digest, + "inhibit_login": True, + } + channel = self.make_request("POST", "/_synapse/admin/v1/register", body) self.assertEqual(channel.code, 200, channel.json_body) user_id = channel.json_body["user_id"] @@ -676,9 +671,7 @@ class HomeserverTestCase(TestCase): custom_headers: Optional[Iterable[CustomHeaderType]] = None, ) -> str: """ - Log in a user, and get an access token. Requires the Login API be - registered. - + Log in a user, and get an access token. Requires the Login API be registered. """ body = {"type": "m.login.password", "user": username, "password": password} if device_id: @@ -687,7 +680,7 @@ class HomeserverTestCase(TestCase): channel = self.make_request( "POST", "/_matrix/client/r0/login", - json.dumps(body).encode("utf8"), + body, custom_headers=custom_headers, ) self.assertEqual(channel.code, 200, channel.result) -- cgit 1.4.1 From c6a05063ff26502d126be33dd4ae2854ea88cbab Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Jul 2022 10:05:30 +0100 Subject: Don't pull out the full state when creating an event (#13281) --- changelog.d/13281.misc | 1 + synapse/events/builder.py | 8 +++++++- synapse/state/__init__.py | 3 ++- 3 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13281.misc diff --git a/changelog.d/13281.misc b/changelog.d/13281.misc new file mode 100644 index 0000000000..dea51d1362 --- /dev/null +++ b/changelog.d/13281.misc @@ -0,0 +1 @@ +Don't pull out the full state when creating an event. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 4caf6cbdee..17f624b68f 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -24,9 +24,11 @@ from synapse.api.room_versions import ( RoomVersion, ) from synapse.crypto.event_signing import add_hashes_and_signatures +from synapse.event_auth import auth_types_for_event from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict from synapse.state import StateHandler from synapse.storage.databases.main import DataStore +from synapse.storage.state import StateFilter from synapse.types import EventID, JsonDict from synapse.util import Clock from synapse.util.stringutils import random_string @@ -121,7 +123,11 @@ class EventBuilder: """ if auth_event_ids is None: state_ids = await self._state.compute_state_after_events( - self.room_id, prev_event_ids + self.room_id, + prev_event_ids, + state_filter=StateFilter.from_types( + auth_types_for_event(self.room_version, self) + ), ) auth_event_ids = self._event_auth_handler.compute_auth_events( self, state_ids diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index fcb7e829d4..e3faa52cd6 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -157,6 +157,7 @@ class StateHandler: self, room_id: str, event_ids: Collection[str], + state_filter: Optional[StateFilter] = None, ) -> StateMap[str]: """Fetch the state after each of the given event IDs. Resolve them and return. @@ -174,7 +175,7 @@ class StateHandler: """ logger.debug("calling resolve_state_groups from compute_state_after_events") ret = await self.resolve_state_groups_for_events(room_id, event_ids) - return await ret.get_state(self._state_storage_controller, StateFilter.all()) + return await ret.get_state(self._state_storage_controller, state_filter) async def get_current_users_in_room( self, room_id: str, latest_event_ids: List[str] -- cgit 1.4.1 From c5f487b7cb7994c1782bf639223940f19b18e4d9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 18 Jul 2022 13:02:25 +0100 Subject: Update expected DB query count when creating a room (#13307) --- changelog.d/13307.misc | 1 + tests/rest/client/test_rooms.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13307.misc diff --git a/changelog.d/13307.misc b/changelog.d/13307.misc new file mode 100644 index 0000000000..45b628ce13 --- /dev/null +++ b/changelog.d/13307.misc @@ -0,0 +1 @@ +Don't pull out the full state when creating an event. \ No newline at end of file diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index c60ca604e9..17571b2d33 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -710,7 +710,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(36, channel.resource_usage.db_txn_count) + self.assertEqual(43, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -723,7 +723,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(40, channel.resource_usage.db_txn_count) + self.assertEqual(49, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id -- cgit 1.4.1 From 6785b0f39d8f920a7b91a8a6a043ede08eb277e4 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Mon, 18 Jul 2022 15:17:24 +0200 Subject: Use READ COMMITTED isolation level when purging rooms (#12942) To close: #10294. Signed off by Nick @ Beeper. --- changelog.d/12942.misc | 1 + synapse/storage/databases/main/purge_events.py | 33 ++++++++++++++++++++++++-- 2 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12942.misc diff --git a/changelog.d/12942.misc b/changelog.d/12942.misc new file mode 100644 index 0000000000..acb2558d57 --- /dev/null +++ b/changelog.d/12942.misc @@ -0,0 +1 @@ +Use lower isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 549ce07c16..6d42276503 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -19,6 +19,8 @@ from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main import CacheInvalidationWorkerStore from synapse.storage.databases.main.state import StateGroupWorkerStore +from synapse.storage.engines import PostgresEngine +from synapse.storage.engines._base import IsolationLevel from synapse.types import RoomStreamToken logger = logging.getLogger(__name__) @@ -317,11 +319,38 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): Returns: The list of state groups to delete. """ - return await self.db_pool.runInteraction( - "purge_room", self._purge_room_txn, room_id + + # This first runs the purge transaction with READ_COMMITTED isolation level, + # meaning any new rows in the tables will not trigger a serialization error. + # We then run the same purge a second time without this isolation level to + # purge any of those rows which were added during the first. + + state_groups_to_delete = await self.db_pool.runInteraction( + "purge_room", + self._purge_room_txn, + room_id=room_id, + isolation_level=IsolationLevel.READ_COMMITTED, + ) + + state_groups_to_delete.extend( + await self.db_pool.runInteraction( + "purge_room", + self._purge_room_txn, + room_id=room_id, + ), ) + return state_groups_to_delete + def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: + # This collides with event persistence so we cannot write new events and metadata into + # a room while deleting it or this transaction will fail. + if isinstance(self.database_engine, PostgresEngine): + txn.execute( + "SELECT room_version FROM rooms WHERE room_id = ? FOR UPDATE", + (room_id,), + ) + # First, fetch all the state groups that should be deleted, before # we delete that information. txn.execute( -- cgit 1.4.1 From cf5fa5063d57e9fde96d666e9152bdda520431b8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Jul 2022 14:19:11 +0100 Subject: Don't pull out full state when sending dummy events (#13310) --- changelog.d/13310.misc | 1 + synapse/handlers/message.py | 8 +------- 2 files changed, 2 insertions(+), 7 deletions(-) create mode 100644 changelog.d/13310.misc diff --git a/changelog.d/13310.misc b/changelog.d/13310.misc new file mode 100644 index 0000000000..eaf570e058 --- /dev/null +++ b/changelog.d/13310.misc @@ -0,0 +1 @@ +Reduce memory usage of sending dummy events. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b5fede9496..85abe71ea8 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1849,13 +1849,8 @@ class EventCreationHandler: # For each room we need to find a joined member we can use to send # the dummy event with. - latest_event_ids = await self.store.get_prev_events_for_room(room_id) - members = await self.state.get_current_users_in_room( - room_id, latest_event_ids=latest_event_ids - ) + members = await self.store.get_local_users_in_room(room_id) for user_id in members: - if not self.hs.is_mine_id(user_id): - continue requester = create_requester(user_id, authenticated_entity=self.server_name) try: event, context = await self.create_event( @@ -1866,7 +1861,6 @@ class EventCreationHandler: "room_id": room_id, "sender": user_id, }, - prev_event_ids=latest_event_ids, ) event.internal_metadata.proactively_send = False -- cgit 1.4.1 From f721f1baba9cdefc0bff540c3b93710b36eecee9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Jul 2022 14:28:14 +0100 Subject: Revert "Make all `process_replication_rows` methods async (#13304)" (#13312) This reverts commit 5d4028f217f178fcd384d5bfddd92225b4e78c51. --- changelog.d/13304.misc | 1 - synapse/handlers/typing.py | 4 ++-- synapse/replication/slave/storage/devices.py | 6 ++---- synapse/replication/slave/storage/push_rule.py | 6 ++---- synapse/replication/slave/storage/pushers.py | 6 ++---- synapse/replication/tcp/client.py | 6 ++---- synapse/storage/_base.py | 2 +- synapse/storage/databases/main/account_data.py | 4 ++-- synapse/storage/databases/main/cache.py | 4 ++-- synapse/storage/databases/main/deviceinbox.py | 6 ++---- synapse/storage/databases/main/events_worker.py | 4 ++-- synapse/storage/databases/main/presence.py | 6 ++---- synapse/storage/databases/main/receipts.py | 6 ++---- synapse/storage/databases/main/tags.py | 4 ++-- 14 files changed, 25 insertions(+), 40 deletions(-) delete mode 100644 changelog.d/13304.misc diff --git a/changelog.d/13304.misc b/changelog.d/13304.misc deleted file mode 100644 index 156d3d71d7..0000000000 --- a/changelog.d/13304.misc +++ /dev/null @@ -1 +0,0 @@ -Make all replication row processing methods asynchronous. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 26edeeca3c..d104ea07fe 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -158,7 +158,7 @@ class FollowerTypingHandler: except Exception: logger.exception("Error pushing typing notif to remotes") - async def process_replication_rows( + def process_replication_rows( self, token: int, rows: List[TypingStream.TypingStreamRow] ) -> None: """Should be called whenever we receive updates for typing stream.""" @@ -444,7 +444,7 @@ class TypingWriterHandler(FollowerTypingHandler): return rows, current_id, limited - async def process_replication_rows( + def process_replication_rows( self, token: int, rows: List[TypingStream.TypingStreamRow] ) -> None: # The writing process should never get updates from replication. diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 22f7999721..a48cc02069 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -49,7 +49,7 @@ class SlavedDeviceStore(DeviceWorkerStore, BaseSlavedStore): def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == DeviceListsStream.NAME: @@ -59,9 +59,7 @@ class SlavedDeviceStore(DeviceWorkerStore, BaseSlavedStore): self._device_list_id_gen.advance(instance_name, token) for row in rows: self._user_signature_stream_cache.entity_has_changed(row.user_id, token) - return await super().process_replication_rows( - stream_name, instance_name, token, rows - ) + return super().process_replication_rows(stream_name, instance_name, token, rows) def _invalidate_caches_for_devices( self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow] diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py index e1838a81a9..52ee3f7e58 100644 --- a/synapse/replication/slave/storage/push_rule.py +++ b/synapse/replication/slave/storage/push_rule.py @@ -24,7 +24,7 @@ class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore): def get_max_push_rules_stream_id(self) -> int: return self._push_rules_stream_id_gen.get_current_token() - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == PushRulesStream.NAME: @@ -33,6 +33,4 @@ class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore): self.get_push_rules_for_user.invalidate((row.user_id,)) self.get_push_rules_enabled_for_user.invalidate((row.user_id,)) self.push_rules_stream_cache.entity_has_changed(row.user_id, token) - return await super().process_replication_rows( - stream_name, instance_name, token, rows - ) + return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index fb3f5653af..de642bba71 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -40,11 +40,9 @@ class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): def get_pushers_stream_token(self) -> int: return self._pushers_id_gen.get_current_token() - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == PushersStream.NAME: self._pushers_id_gen.advance(instance_name, token) - return await super().process_replication_rows( - stream_name, instance_name, token, rows - ) + return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index f9722ccb4f..2f59245058 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -144,15 +144,13 @@ class ReplicationDataHandler: token: stream token for this batch of rows rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row. """ - await self.store.process_replication_rows( - stream_name, instance_name, token, rows - ) + self.store.process_replication_rows(stream_name, instance_name, token, rows) if self.send_handler: await self.send_handler.process_replication_rows(stream_name, token, rows) if stream_name == TypingStream.NAME: - await self._typing_handler.process_replication_rows(token, rows) + self._typing_handler.process_replication_rows(token, rows) self.notifier.on_new_event( StreamKeyType.TYPING, token, rooms=[row.room_id for row in rows] ) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 822108e83b..b8c8dcd76b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -47,7 +47,7 @@ class SQLBaseStore(metaclass=ABCMeta): self.database_engine = database.engine self.db_pool = database - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 337b22294e..9af9f4f18e 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -414,7 +414,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) ) - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, @@ -437,7 +437,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) self._account_data_stream_cache.entity_has_changed(row.user_id, token) - await super().process_replication_rows(stream_name, instance_name, token, rows) + super().process_replication_rows(stream_name, instance_name, token, rows) async def add_account_data_to_room( self, user_id: str, room_id: str, account_data_type: str, content: JsonDict diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 048ff3e1b7..2367ddeea3 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -119,7 +119,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "get_all_updated_caches", get_all_updated_caches_txn ) - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == EventsStream.NAME: @@ -154,7 +154,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): else: self._attempt_to_invalidate_cache(row.cache_func, row.keys) - await super().process_replication_rows(stream_name, instance_name, token, rows) + super().process_replication_rows(stream_name, instance_name, token, rows) def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None: data = row.data diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 45fe58c104..422e0e65ca 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -128,7 +128,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): prefilled_cache=device_outbox_prefill, ) - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, @@ -148,9 +148,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): self._device_federation_outbox_stream_cache.entity_has_changed( row.entity, token ) - return await super().process_replication_rows( - stream_name, instance_name, token, rows - ) + return super().process_replication_rows(stream_name, instance_name, token, rows) def get_to_device_stream_token(self) -> int: return self._device_inbox_id_gen.get_current_token() diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 5310d4eda2..f3935bfead 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -280,7 +280,7 @@ class EventsWorkerStore(SQLBaseStore): id_column="chain_id", ) - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, @@ -292,7 +292,7 @@ class EventsWorkerStore(SQLBaseStore): elif stream_name == BackfillStream.NAME: self._backfill_id_gen.advance(instance_name, -token) - await super().process_replication_rows(stream_name, instance_name, token, rows) + super().process_replication_rows(stream_name, instance_name, token, rows) async def have_censored_event(self, event_id: str) -> bool: """Check if an event has been censored, i.e. if the content of the event has been erased diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 9fe3124b35..9769a18a9d 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -431,7 +431,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) self._presence_on_startup = [] return active_on_startup - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, @@ -443,6 +443,4 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) for row in rows: self.presence_stream_cache.entity_has_changed(row.user_id, token) self._get_presence_for_user.invalidate((row.user_id,)) - return await super().process_replication_rows( - stream_name, instance_name, token, rows - ) + return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index f85862d968..0090c9f225 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -589,7 +589,7 @@ class ReceiptsWorkerStore(SQLBaseStore): "get_unread_event_push_actions_by_room_for_user", (room_id,) ) - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, @@ -604,9 +604,7 @@ class ReceiptsWorkerStore(SQLBaseStore): ) self._receipts_stream_cache.entity_has_changed(row.room_id, token) - return await super().process_replication_rows( - stream_name, instance_name, token, rows - ) + return super().process_replication_rows(stream_name, instance_name, token, rows) def _insert_linearized_receipt_txn( self, diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index 5e8905369c..b0f5de67a3 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -292,7 +292,7 @@ class TagsWorkerStore(AccountDataWorkerStore): # than the id that the client has. pass - async def process_replication_rows( + def process_replication_rows( self, stream_name: str, instance_name: str, @@ -305,7 +305,7 @@ class TagsWorkerStore(AccountDataWorkerStore): self.get_tags_for_user.invalidate((row.user_id,)) self._account_data_stream_cache.entity_has_changed(row.user_id, token) - await super().process_replication_rows(stream_name, instance_name, token, rows) + super().process_replication_rows(stream_name, instance_name, token, rows) class TagsStore(TagsWorkerStore): -- cgit 1.4.1 From bb25dd81e3d909299092fce7adda05cc6f35dee2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 18 Jul 2022 15:02:32 +0100 Subject: Prevent #3679 from appearing in blame results (#13311) --- .git-blame-ignore-revs | 13 +++++++++++++ changelog.d/13311.misc | 1 + 2 files changed, 14 insertions(+) create mode 100644 changelog.d/13311.misc diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 50d28c68ee..c3638c35eb 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,3 +1,16 @@ +# Commits in this file will be removed from GitHub blame results. +# +# To use this file locally, use: +# git blame --ignore-revs-file="path/to/.git-blame-ignore-revs" +# +# or configure the `blame.ignoreRevsFile` option in your git config. +# +# If ignoring a pull request that was not squash merged, only the merge +# commit needs to be put here. Child commits will be resolved from it. + +# Run black (#3679). +8b3d9b6b199abb87246f982d5db356f1966db925 + # Black reformatting (#5482). 32e7c9e7f20b57dd081023ac42d6931a8da9b3a3 diff --git a/changelog.d/13311.misc b/changelog.d/13311.misc new file mode 100644 index 0000000000..4be81c675c --- /dev/null +++ b/changelog.d/13311.misc @@ -0,0 +1 @@ +Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. \ No newline at end of file -- cgit 1.4.1 From 8c60c572f011a502040b509ae648fd5cad3d4428 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 18 Jul 2022 17:30:59 +0200 Subject: Up the dependency on canonicaljson to ^1.5.0 (#13172) Co-authored-by: David Robertson --- changelog.d/13172.misc | 1 + poetry.lock | 2 +- pyproject.toml | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13172.misc diff --git a/changelog.d/13172.misc b/changelog.d/13172.misc new file mode 100644 index 0000000000..124a1b3662 --- /dev/null +++ b/changelog.d/13172.misc @@ -0,0 +1 @@ +Always use a version of canonicaljson that supports the C implementation of frozendict. diff --git a/poetry.lock b/poetry.lock index 3a08c9478d..41ab40edd1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "e96625923122e29b6ea5964379828e321b6cede2b020fc32c6f86c09d86d1ae8" +content-hash = "c24bbcee7e86dbbe7cdbf49f91a25b310bf21095452641e7440129f59b077f78" [metadata.files] attrs = [ diff --git a/pyproject.toml b/pyproject.toml index f77c02ca27..21bea2ba01 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -110,7 +110,9 @@ jsonschema = ">=3.0.0" frozendict = ">=1,!=2.1.2" # We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0 unpaddedbase64 = ">=2.1.0" -canonicaljson = "^1.4.0" +# We require 1.5.0 to work around an issue when running against the C implementation of +# frozendict: https://github.com/matrix-org/python-canonicaljson/issues/36 +canonicaljson = "^1.5.0" # we use the type definitions added in signedjson 1.1. signedjson = "^1.1.0" # validating SSL certs for IP addresses requires service_identity 18.1. -- cgit 1.4.1 From 5526f9fc4f53c9e0eac6aa610bf0a76906b772dc Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 18 Jul 2022 17:39:39 +0100 Subject: Fix overcounting of pushers when they are replaced (#13296) Signed-off-by: Sean Quah --- changelog.d/13296.bugfix | 1 + synapse/push/pusherpool.py | 27 ++++++++++++++++----------- 2 files changed, 17 insertions(+), 11 deletions(-) create mode 100644 changelog.d/13296.bugfix diff --git a/changelog.d/13296.bugfix b/changelog.d/13296.bugfix new file mode 100644 index 0000000000..ff0eb2b4a1 --- /dev/null +++ b/changelog.d/13296.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d0cc657b44..1e0ef44fc7 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -328,7 +328,7 @@ class PusherPool: return None try: - p = self.pusher_factory.create_pusher(pusher_config) + pusher = self.pusher_factory.create_pusher(pusher_config) except PusherConfigException as e: logger.warning( "Pusher incorrectly configured id=%i, user=%s, appid=%s, pushkey=%s: %s", @@ -346,23 +346,28 @@ class PusherPool: ) return None - if not p: + if not pusher: return None - appid_pushkey = "%s:%s" % (pusher_config.app_id, pusher_config.pushkey) + appid_pushkey = "%s:%s" % (pusher.app_id, pusher.pushkey) - byuser = self.pushers.setdefault(pusher_config.user_name, {}) + byuser = self.pushers.setdefault(pusher.user_id, {}) if appid_pushkey in byuser: - byuser[appid_pushkey].on_stop() - byuser[appid_pushkey] = p + previous_pusher = byuser[appid_pushkey] + previous_pusher.on_stop() - synapse_pushers.labels(type(p).__name__, p.app_id).inc() + synapse_pushers.labels( + type(previous_pusher).__name__, previous_pusher.app_id + ).dec() + byuser[appid_pushkey] = pusher + + synapse_pushers.labels(type(pusher).__name__, pusher.app_id).inc() # Check if there *may* be push to process. We do this as this check is a # lot cheaper to do than actually fetching the exact rows we need to # push. - user_id = pusher_config.user_name - last_stream_ordering = pusher_config.last_stream_ordering + user_id = pusher.user_id + last_stream_ordering = pusher.last_stream_ordering if last_stream_ordering: have_notifs = await self.store.get_if_maybe_push_in_range_for_user( user_id, last_stream_ordering @@ -372,9 +377,9 @@ class PusherPool: # risk missing push. have_notifs = True - p.on_started(have_notifs) + pusher.on_started(have_notifs) - return p + return pusher async def remove_pusher(self, app_id: str, pushkey: str, user_id: str) -> None: appid_pushkey = "%s:%s" % (app_id, pushkey) -- cgit 1.4.1 From 15edf23626a55ee38a8ee394bfcc8eaeff733c37 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 18 Jul 2022 12:35:45 -0700 Subject: Improve performance of query ` _get_subset_users_in_room_with_profiles` (#13299) --- changelog.d/13299.misc | 1 + synapse/storage/databases/main/roommember.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13299.misc diff --git a/changelog.d/13299.misc b/changelog.d/13299.misc new file mode 100644 index 0000000000..a9d5566873 --- /dev/null +++ b/changelog.d/13299.misc @@ -0,0 +1 @@ +Improve performance of query `_get_subset_users_in_room_with_profiles`. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 105a518677..46ab6232d4 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -243,7 +243,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): txn: LoggingTransaction, ) -> Dict[str, ProfileInfo]: clause, ids = make_in_list_sql_clause( - self.database_engine, "m.user_id", user_ids + self.database_engine, "c.state_key", user_ids ) sql = """ -- cgit 1.4.1 From 7864f33e286dec22368dc0b11c06eebb1462a51e Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 18 Jul 2022 13:15:23 -0700 Subject: Increase batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. (#13300) --- changelog.d/13300.misc | 1 + synapse/storage/databases/main/push_rule.py | 1 + synapse/storage/databases/main/roommember.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13300.misc diff --git a/changelog.d/13300.misc b/changelog.d/13300.misc new file mode 100644 index 0000000000..ee58add3c4 --- /dev/null +++ b/changelog.d/13300.misc @@ -0,0 +1 @@ +Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 86649c1e6c..768f95d16c 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -228,6 +228,7 @@ class PushRulesWorkerStore( iterable=user_ids, retcols=("*",), desc="bulk_get_push_rules", + batch_size=1000, ) rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 46ab6232d4..df6b82660e 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -904,7 +904,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): iterable=event_ids, retcols=("user_id", "display_name", "avatar_url", "event_id"), keyvalues={"membership": Membership.JOIN}, - batch_size=500, + batch_size=1000, desc="_get_joined_profiles_from_event_ids", ) -- cgit 1.4.1 From 2ee0b6ef4b78bada535beb30301cf0e01cbb7d81 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Tue, 19 Jul 2022 13:25:29 +0200 Subject: Safe async event cache (#13308) Fix race conditions in the async cache invalidation logic, by separating the async & local invalidation calls and ensuring any async call i executed first. Signed off by Nick @ Beeper (@Fizzadar). --- changelog.d/13308.misc | 1 + synapse/storage/_base.py | 9 +++- synapse/storage/database.py | 54 +++++++++++++++++++--- synapse/storage/databases/main/censor_events.py | 2 +- synapse/storage/databases/main/events.py | 6 +-- synapse/storage/databases/main/events_worker.py | 48 +++++++++++++++---- .../storage/databases/main/monthly_active_users.py | 1 + synapse/storage/databases/main/purge_events.py | 2 +- 8 files changed, 102 insertions(+), 21 deletions(-) create mode 100644 changelog.d/13308.misc diff --git a/changelog.d/13308.misc b/changelog.d/13308.misc new file mode 100644 index 0000000000..7f8ec0815f --- /dev/null +++ b/changelog.d/13308.misc @@ -0,0 +1 @@ +Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b8c8dcd76b..a2f8310388 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -96,6 +96,10 @@ class SQLBaseStore(metaclass=ABCMeta): cache doesn't exist. Mainly used for invalidating caches on workers, where they may not have the cache. + Note that this function does not invalidate any remote caches, only the + local in-memory ones. Any remote invalidation must be performed before + calling this. + Args: cache_name key: Entry to invalidate. If None then invalidates the entire @@ -112,7 +116,10 @@ class SQLBaseStore(metaclass=ABCMeta): if key is None: cache.invalidate_all() else: - cache.invalidate(tuple(key)) + # Prefer any local-only invalidation method. Invalidating any non-local + # cache must be be done before this. + invalidate_method = getattr(cache, "invalidate_local", cache.invalidate) + invalidate_method(tuple(key)) def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any: diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 6a6d0dcd73..ea672ff89e 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -23,6 +23,7 @@ from time import monotonic as monotonic_time from typing import ( TYPE_CHECKING, Any, + Awaitable, Callable, Collection, Dict, @@ -57,7 +58,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor -from synapse.util.async_helpers import delay_cancellation, maybe_awaitable +from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -168,6 +169,7 @@ class LoggingDatabaseConnection: *, txn_name: Optional[str] = None, after_callbacks: Optional[List["_CallbackListEntry"]] = None, + async_after_callbacks: Optional[List["_AsyncCallbackListEntry"]] = None, exception_callbacks: Optional[List["_CallbackListEntry"]] = None, ) -> "LoggingTransaction": if not txn_name: @@ -178,6 +180,7 @@ class LoggingDatabaseConnection: name=txn_name, database_engine=self.engine, after_callbacks=after_callbacks, + async_after_callbacks=async_after_callbacks, exception_callbacks=exception_callbacks, ) @@ -209,6 +212,9 @@ class LoggingDatabaseConnection: # The type of entry which goes on our after_callbacks and exception_callbacks lists. _CallbackListEntry = Tuple[Callable[..., object], Tuple[object, ...], Dict[str, object]] +_AsyncCallbackListEntry = Tuple[ + Callable[..., Awaitable], Tuple[object, ...], Dict[str, object] +] P = ParamSpec("P") R = TypeVar("R") @@ -227,6 +233,10 @@ class LoggingTransaction: that have been added by `call_after` which should be run on successful completion of the transaction. None indicates that no callbacks should be allowed to be scheduled to run. + async_after_callbacks: A list that asynchronous callbacks will be appended + to by `async_call_after` which should run, before after_callbacks, on + successful completion of the transaction. None indicates that no + callbacks should be allowed to be scheduled to run. exception_callbacks: A list that callbacks will be appended to that have been added by `call_on_exception` which should be run if transaction ends with an error. None indicates that no callbacks @@ -238,6 +248,7 @@ class LoggingTransaction: "name", "database_engine", "after_callbacks", + "async_after_callbacks", "exception_callbacks", ] @@ -247,12 +258,14 @@ class LoggingTransaction: name: str, database_engine: BaseDatabaseEngine, after_callbacks: Optional[List[_CallbackListEntry]] = None, + async_after_callbacks: Optional[List[_AsyncCallbackListEntry]] = None, exception_callbacks: Optional[List[_CallbackListEntry]] = None, ): self.txn = txn self.name = name self.database_engine = database_engine self.after_callbacks = after_callbacks + self.async_after_callbacks = async_after_callbacks self.exception_callbacks = exception_callbacks def call_after( @@ -277,6 +290,28 @@ class LoggingTransaction: # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 self.after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] + def async_call_after( + self, callback: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs + ) -> None: + """Call the given asynchronous callback on the main twisted thread after + the transaction has finished (but before those added in `call_after`). + + Mostly used to invalidate remote caches after transactions. + + Note that transactions may be retried a few times if they encounter database + errors such as serialization failures. Callbacks given to `async_call_after` + will accumulate across transaction attempts and will _all_ be called once a + transaction attempt succeeds, regardless of whether previous transaction + attempts failed. Otherwise, if all transaction attempts fail, all + `call_on_exception` callbacks will be run instead. + """ + # if self.async_after_callbacks is None, that means that whatever constructed the + # LoggingTransaction isn't expecting there to be any callbacks; assert that + # is not the case. + assert self.async_after_callbacks is not None + # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 + self.async_after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] + def call_on_exception( self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs ) -> None: @@ -574,6 +609,7 @@ class DatabasePool: conn: LoggingDatabaseConnection, desc: str, after_callbacks: List[_CallbackListEntry], + async_after_callbacks: List[_AsyncCallbackListEntry], exception_callbacks: List[_CallbackListEntry], func: Callable[Concatenate[LoggingTransaction, P], R], *args: P.args, @@ -597,6 +633,7 @@ class DatabasePool: conn desc after_callbacks + async_after_callbacks exception_callbacks func *args @@ -659,6 +696,7 @@ class DatabasePool: cursor = conn.cursor( txn_name=name, after_callbacks=after_callbacks, + async_after_callbacks=async_after_callbacks, exception_callbacks=exception_callbacks, ) try: @@ -798,6 +836,7 @@ class DatabasePool: async def _runInteraction() -> R: after_callbacks: List[_CallbackListEntry] = [] + async_after_callbacks: List[_AsyncCallbackListEntry] = [] exception_callbacks: List[_CallbackListEntry] = [] if not current_context(): @@ -809,6 +848,7 @@ class DatabasePool: self.new_transaction, desc, after_callbacks, + async_after_callbacks, exception_callbacks, func, *args, @@ -817,15 +857,17 @@ class DatabasePool: **kwargs, ) + # We order these assuming that async functions call out to external + # systems (e.g. to invalidate a cache) and the sync functions make these + # changes on any local in-memory caches/similar, and thus must be second. + for async_callback, async_args, async_kwargs in async_after_callbacks: + await async_callback(*async_args, **async_kwargs) for after_callback, after_args, after_kwargs in after_callbacks: - await maybe_awaitable(after_callback(*after_args, **after_kwargs)) - + after_callback(*after_args, **after_kwargs) return cast(R, result) except Exception: for exception_callback, after_args, after_kwargs in exception_callbacks: - await maybe_awaitable( - exception_callback(*after_args, **after_kwargs) - ) + exception_callback(*after_args, **after_kwargs) raise # To handle cancellation, we ensure that `after_callback`s and diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index fd3fc298b3..58177ecec1 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -194,7 +194,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase # changed its content in the database. We can't call # self._invalidate_cache_and_stream because self.get_event_cache isn't of the # right type. - txn.call_after(self._get_event_cache.invalidate, (event.event_id,)) + self.invalidate_get_event_cache_after_txn(txn, event.event_id) # Send that invalidation to replication so that other workers also invalidate # the event cache. self._send_invalidation_to_replication( diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index fa2266ba20..156e1bd5ab 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1293,7 +1293,7 @@ class PersistEventsStore: depth_updates: Dict[str, int] = {} for event, context in events_and_contexts: # Remove the any existing cache entries for the event_ids - txn.call_after(self.store._invalidate_get_event_cache, event.event_id) + self.store.invalidate_get_event_cache_after_txn(txn, event.event_id) # Then update the `stream_ordering` position to mark the latest # event as the front of the room. This should not be done for # backfilled events because backfilled events have negative @@ -1675,7 +1675,7 @@ class PersistEventsStore: (cache_entry.event.event_id,), cache_entry ) - txn.call_after(prefill) + txn.async_call_after(prefill) def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: """Invalidate the caches for the redacted event. @@ -1684,7 +1684,7 @@ class PersistEventsStore: _invalidate_caches_for_event. """ assert event.redacts is not None - txn.call_after(self.store._invalidate_get_event_cache, event.redacts) + self.store.invalidate_get_event_cache_after_txn(txn, event.redacts) txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,)) txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,)) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index f3935bfead..4435373146 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -712,17 +712,41 @@ class EventsWorkerStore(SQLBaseStore): return event_entry_map - async def _invalidate_get_event_cache(self, event_id: str) -> None: - # First we invalidate the asynchronous cache instance. This may include - # out-of-process caches such as Redis/memcache. Once complete we can - # invalidate any in memory cache. The ordering is important here to - # ensure we don't pull in any remote invalid value after we invalidate - # the in-memory cache. + def invalidate_get_event_cache_after_txn( + self, txn: LoggingTransaction, event_id: str + ) -> None: + """ + Prepares a database transaction to invalidate the get event cache for a given + event ID when executed successfully. This is achieved by attaching two callbacks + to the transaction, one to invalidate the async cache and one for the in memory + sync cache (importantly called in that order). + + Arguments: + txn: the database transaction to attach the callbacks to + event_id: the event ID to be invalidated from caches + """ + + txn.async_call_after(self._invalidate_async_get_event_cache, event_id) + txn.call_after(self._invalidate_local_get_event_cache, event_id) + + async def _invalidate_async_get_event_cache(self, event_id: str) -> None: + """ + Invalidates an event in the asyncronous get event cache, which may be remote. + + Arguments: + event_id: the event ID to invalidate + """ + await self._get_event_cache.invalidate((event_id,)) - self._event_ref.pop(event_id, None) - self._current_event_fetches.pop(event_id, None) def _invalidate_local_get_event_cache(self, event_id: str) -> None: + """ + Invalidates an event in local in-memory get event caches. + + Arguments: + event_id: the event ID to invalidate + """ + self._get_event_cache.invalidate_local((event_id,)) self._event_ref.pop(event_id, None) self._current_event_fetches.pop(event_id, None) @@ -958,7 +982,13 @@ class EventsWorkerStore(SQLBaseStore): } row_dict = self.db_pool.new_transaction( - conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch + conn, + "do_fetch", + [], + [], + [], + self._fetch_event_rows, + events_to_fetch, ) # We only want to resolve deferreds from the main thread diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 9a63f953fb..efd136a864 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -66,6 +66,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): "initialise_mau_threepids", [], [], + [], self._initialise_reserved_users, hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value], ) diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 6d42276503..f6822707e4 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -304,7 +304,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): self._invalidate_cache_and_stream( txn, self.have_seen_event, (room_id, event_id) ) - txn.call_after(self._invalidate_get_event_cache, event_id) + self.invalidate_get_event_cache_after_txn(txn, event_id) logger.info("[purge] done") -- cgit 1.4.1 From b9778673587941277e15b067ad39cdf084f7dde5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 19 Jul 2022 12:45:17 +0100 Subject: Rate limit joins per-room (#13276) --- changelog.d/13276.feature | 1 + .../complement/conf/workers-shared-extra.yaml.j2 | 4 + docs/upgrade.md | 10 + docs/usage/configuration/config_documentation.md | 19 ++ synapse/config/ratelimiting.py | 7 + synapse/federation/federation_server.py | 16 ++ synapse/handlers/federation_event.py | 4 + synapse/handlers/message.py | 11 + synapse/handlers/room_member.py | 37 +++ synapse/replication/tcp/client.py | 17 +- synapse/replication/tcp/streams/events.py | 1 + synapse/storage/databases/main/events_worker.py | 22 +- tests/federation/test_federation_server.py | 63 ++++- tests/handlers/test_room_member.py | 290 +++++++++++++++++++++ tests/rest/client/test_rooms.py | 4 +- tests/test_server.py | 2 +- tests/unittest.py | 4 +- tests/utils.py | 1 + 18 files changed, 498 insertions(+), 15 deletions(-) create mode 100644 changelog.d/13276.feature create mode 100644 tests/handlers/test_room_member.py diff --git a/changelog.d/13276.feature b/changelog.d/13276.feature new file mode 100644 index 0000000000..068d158ed5 --- /dev/null +++ b/changelog.d/13276.feature @@ -0,0 +1 @@ +Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttle additional joins if that rate grows too large. diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index b5f675bc73..9e554a865e 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -67,6 +67,10 @@ rc_joins: per_second: 9999 burst_count: 9999 +rc_joins_per_room: + per_second: 9999 + burst_count: 9999 + rc_3pid_validation: per_second: 1000 burst_count: 1000 diff --git a/docs/upgrade.md b/docs/upgrade.md index 3aaeb499ce..2c7c258909 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -104,6 +104,16 @@ minimum, a `notif_from` setting.) Specifying an `email` setting under `account_threepid_delegates` will now cause an error at startup. +## Changes to the event replication streams + +Synapse now includes a flag indicating if an event is an outlier when +replicating it to other workers. This is a forwards- and backwards-incompatible +change: v1.63 and workers cannot process events replicated by v1.64 workers, and +vice versa. + +Once all workers are upgraded to v1.64 (or downgraded to v1.63), event +replication will resume as normal. + # Upgrading to v1.62.0 ## New signatures for spam checker callbacks diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 5fe502e33a..be272a400c 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1471,6 +1471,25 @@ rc_joins: per_second: 0.03 burst_count: 12 ``` +--- +### `rc_joins_per_room` + +This option allows admins to ratelimit joins to a room based on the number of recent +joins (local or remote) to that room. It is intended to mitigate mass-join spam +waves which target multiple homeservers. + +By default, one join is permitted to a room every second, with an accumulating +buffer of up to ten instantaneous joins. + +Example configuration (default values): +```yaml +rc_joins_per_room: + per_second: 1 + burst_count: 10 +``` + +_Added in Synapse 1.64.0._ + --- ### `rc_3pid_validation` diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 4fc1784efe..5a91917b4a 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -112,6 +112,13 @@ class RatelimitConfig(Config): defaults={"per_second": 0.01, "burst_count": 10}, ) + # Track the rate of joins to a given room. If there are too many, temporarily + # prevent local joins and remote joins via this server. + self.rc_joins_per_room = RateLimitConfig( + config.get("rc_joins_per_room", {}), + defaults={"per_second": 1, "burst_count": 10}, + ) + # Ratelimit cross-user key requests: # * For local requests this is keyed by the sending device. # * For requests received over federation this is keyed by the origin. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 5dfdc86740..ae550d3f4d 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -118,6 +118,7 @@ class FederationServer(FederationBase): self._federation_event_handler = hs.get_federation_event_handler() self.state = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() + self._room_member_handler = hs.get_room_member_handler() self._state_storage_controller = hs.get_storage_controllers().state @@ -621,6 +622,15 @@ class FederationServer(FederationBase): ) raise IncompatibleRoomVersionError(room_version=room_version) + # Refuse the request if that room has seen too many joins recently. + # This is in addition to the HS-level rate limiting applied by + # BaseFederationServlet. + # type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?) + await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type] + requester=None, + key=room_id, + update=False, + ) pdu = await self.handler.on_make_join_request(origin, room_id, user_id) return {"event": pdu.get_templated_pdu_json(), "room_version": room_version} @@ -655,6 +665,12 @@ class FederationServer(FederationBase): room_id: str, caller_supports_partial_state: bool = False, ) -> Dict[str, Any]: + await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type] + requester=None, + key=room_id, + update=False, + ) + event, context = await self._on_send_membership_event( origin, content, Membership.JOIN, room_id ) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index b1dab57447..766d9849f5 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1980,6 +1980,10 @@ class FederationEventHandler: event, event_pos, max_stream_token, extra_users=extra_users ) + if event.type == EventTypes.Member and event.membership == Membership.JOIN: + # TODO retrieve the previous state, and exclude join -> join transitions + self._notifier.notify_user_joined_room(event.event_id, event.room_id) + def _sanity_check_event(self, ev: EventBase) -> None: """ Do some early sanity checks of a received event diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 85abe71ea8..bd7baef051 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -463,6 +463,7 @@ class EventCreationHandler: ) self._events_shard_config = self.config.worker.events_shard_config self._instance_name = hs.get_instance_name() + self._notifier = hs.get_notifier() self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state @@ -1550,6 +1551,16 @@ class EventCreationHandler: requester, is_admin_redaction=is_admin_redaction ) + if event.type == EventTypes.Member and event.membership == Membership.JOIN: + ( + current_membership, + _, + ) = await self.store.get_local_current_membership_for_user_in_room( + event.state_key, event.room_id + ) + if current_membership != Membership.JOIN: + self._notifier.notify_user_joined_room(event.event_id, event.room_id) + await self._maybe_kick_guest_users(event, context) if event.type == EventTypes.CanonicalAlias: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index a5b9ac904e..30b4cb23df 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -94,12 +94,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): rate_hz=hs.config.ratelimiting.rc_joins_local.per_second, burst_count=hs.config.ratelimiting.rc_joins_local.burst_count, ) + # Tracks joins from local users to rooms this server isn't a member of. + # I.e. joins this server makes by requesting /make_join /send_join from + # another server. self._join_rate_limiter_remote = Ratelimiter( store=self.store, clock=self.clock, rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second, burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count, ) + # TODO: find a better place to keep this Ratelimiter. + # It needs to be + # - written to by event persistence code + # - written to by something which can snoop on replication streams + # - read by the RoomMemberHandler to rate limit joins from local users + # - read by the FederationServer to rate limit make_joins and send_joins from + # other homeservers + # I wonder if a homeserver-wide collection of rate limiters might be cleaner? + self._join_rate_per_room_limiter = Ratelimiter( + store=self.store, + clock=self.clock, + rate_hz=hs.config.ratelimiting.rc_joins_per_room.per_second, + burst_count=hs.config.ratelimiting.rc_joins_per_room.burst_count, + ) # Ratelimiter for invites, keyed by room (across all issuers, all # recipients). @@ -136,6 +153,18 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) self.request_ratelimiter = hs.get_request_ratelimiter() + hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room) + + def _on_user_joined_room(self, event_id: str, room_id: str) -> None: + """Notify the rate limiter that a room join has occurred. + + Use this to inform the RoomMemberHandler about joins that have either + - taken place on another homeserver, or + - on another worker in this homeserver. + Joins actioned by this worker should use the usual `ratelimit` method, which + checks the limit and increments the counter in one go. + """ + self._join_rate_per_room_limiter.record_action(requester=None, key=room_id) @abc.abstractmethod async def _remote_join( @@ -396,6 +425,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # up blocking profile updates. if newly_joined and ratelimit: await self._join_rate_limiter_local.ratelimit(requester) + await self._join_rate_per_room_limiter.ratelimit( + requester, key=room_id, update=False + ) result_event = await self.event_creation_handler.handle_new_client_event( requester, @@ -867,6 +899,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): await self._join_rate_limiter_remote.ratelimit( requester, ) + await self._join_rate_per_room_limiter.ratelimit( + requester, + key=room_id, + update=False, + ) inviter = await self._get_inviter(target.to_string(), room_id) if inviter and not self.hs.is_mine(inviter): diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 2f59245058..e4f2201c92 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -21,7 +21,7 @@ from twisted.internet.interfaces import IAddress, IConnector from twisted.internet.protocol import ReconnectingClientFactory from twisted.python.failure import Failure -from synapse.api.constants import EventTypes, ReceiptTypes +from synapse.api.constants import EventTypes, Membership, ReceiptTypes from synapse.federation import send_queue from synapse.federation.sender import FederationSender from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable @@ -219,6 +219,21 @@ class ReplicationDataHandler: membership=row.data.membership, ) + # If this event is a join, make a note of it so we have an accurate + # cross-worker room rate limit. + # TODO: Erik said we should exclude rows that came from ex_outliers + # here, but I don't see how we can determine that. I guess we could + # add a flag to row.data? + if ( + row.data.type == EventTypes.Member + and row.data.membership == Membership.JOIN + and not row.data.outlier + ): + # TODO retrieve the previous state, and exclude join -> join transitions + self.notifier.notify_user_joined_room( + row.data.event_id, row.data.room_id + ) + await self._presence_handler.process_replication_rows( stream_name, instance_name, token, rows ) diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 26f4fa7cfd..14b6705862 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -98,6 +98,7 @@ class EventsStreamEventRow(BaseEventsStreamRow): relates_to: Optional[str] membership: Optional[str] rejected: bool + outlier: bool @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 4435373146..5914a35420 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1490,7 +1490,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_all_new_forward_event_rows( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: + ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: """Returns new events, for the Events replication stream Args: @@ -1506,10 +1506,11 @@ class EventsWorkerStore(SQLBaseStore): def get_all_new_forward_event_rows( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: + ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( "SELECT e.stream_ordering, e.event_id, e.room_id, e.type," - " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL" + " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," + " e.outlier" " FROM events AS e" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events AS se USING (event_id)" @@ -1523,7 +1524,8 @@ class EventsWorkerStore(SQLBaseStore): ) txn.execute(sql, (last_id, current_id, instance_name, limit)) return cast( - List[Tuple[int, str, str, str, str, str, str, str, str]], txn.fetchall() + List[Tuple[int, str, str, str, str, str, str, str, bool, bool]], + txn.fetchall(), ) return await self.db_pool.runInteraction( @@ -1532,7 +1534,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_ex_outlier_stream_rows( self, instance_name: str, last_id: int, current_id: int - ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: + ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: """Returns de-outliered events, for the Events replication stream Args: @@ -1547,11 +1549,14 @@ class EventsWorkerStore(SQLBaseStore): def get_ex_outlier_stream_rows_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: + ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," - " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL" + " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," + " e.outlier" " FROM events AS e" + # NB: the next line (inner join) is what makes this query different from + # get_all_new_forward_event_rows. " INNER JOIN ex_outlier_stream AS out USING (event_id)" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events AS se USING (event_id)" @@ -1566,7 +1571,8 @@ class EventsWorkerStore(SQLBaseStore): txn.execute(sql, (last_id, current_id, instance_name)) return cast( - List[Tuple[int, str, str, str, str, str, str, str, str]], txn.fetchall() + List[Tuple[int, str, str, str, str, str, str, str, bool, bool]], + txn.fetchall(), ) return await self.db_pool.runInteraction( diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 8ea13ceb93..3a6ef221ae 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -148,7 +148,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): tok2 = self.login("fozzie", "bear") self.helper.join(self._room_id, second_member_user_id, tok=tok2) - def _make_join(self, user_id) -> JsonDict: + def _make_join(self, user_id: str) -> JsonDict: channel = self.make_signed_federation_request( "GET", f"/_matrix/federation/v1/make_join/{self._room_id}/{user_id}" @@ -260,6 +260,67 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): ) self.assertEqual(r[("m.room.member", joining_user)].membership, "join") + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 3}}) + def test_make_join_respects_room_join_rate_limit(self) -> None: + # In the test setup, two users join the room. Since the rate limiter burst + # count is 3, a new make_join request to the room should be accepted. + + joining_user = "@ronniecorbett:" + self.OTHER_SERVER_NAME + self._make_join(joining_user) + + # Now have a new local user join the room. This saturates the rate limiter + # bucket, so the next make_join should be denied. + new_local_user = self.register_user("animal", "animal") + token = self.login("animal", "animal") + self.helper.join(self._room_id, new_local_user, tok=token) + + joining_user = "@ronniebarker:" + self.OTHER_SERVER_NAME + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/make_join/{self._room_id}/{joining_user}" + f"?ver={DEFAULT_ROOM_VERSION}", + ) + self.assertEqual(channel.code, HTTPStatus.TOO_MANY_REQUESTS, channel.json_body) + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 3}}) + def test_send_join_contributes_to_room_join_rate_limit_and_is_limited(self) -> None: + # Make two make_join requests up front. (These are rate limited, but do not + # contribute to the rate limit.) + join_event_dicts = [] + for i in range(2): + joining_user = f"@misspiggy{i}:{self.OTHER_SERVER_NAME}" + join_result = self._make_join(joining_user) + join_event_dict = join_result["event"] + self.add_hashes_and_signatures_from_other_server( + join_event_dict, + KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + ) + join_event_dicts.append(join_event_dict) + + # In the test setup, two users join the room. Since the rate limiter burst + # count is 3, the first send_join should be accepted... + channel = self.make_signed_federation_request( + "PUT", + f"/_matrix/federation/v2/send_join/{self._room_id}/join0", + content=join_event_dicts[0], + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # ... but the second should be denied. + channel = self.make_signed_federation_request( + "PUT", + f"/_matrix/federation/v2/send_join/{self._room_id}/join1", + content=join_event_dicts[1], + ) + self.assertEqual(channel.code, HTTPStatus.TOO_MANY_REQUESTS, channel.json_body) + + # NB: we could write a test which checks that the send_join event is seen + # by other workers over replication, and that they update their rate limit + # buckets accordingly. I'm going to assume that the join event gets sent over + # replication, at which point the tests.handlers.room_member test + # test_local_users_joining_on_another_worker_contribute_to_rate_limit + # is probably sufficient to reassure that the bucket is updated. + def _create_acl_event(content): return make_event_from_dict( diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py new file mode 100644 index 0000000000..254e7e4b80 --- /dev/null +++ b/tests/handlers/test_room_member.py @@ -0,0 +1,290 @@ +from http import HTTPStatus +from unittest.mock import Mock, patch + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +import synapse.rest.client.login +import synapse.rest.client.room +from synapse.api.constants import EventTypes, Membership +from synapse.api.errors import LimitExceededError +from synapse.crypto.event_signing import add_hashes_and_signatures +from synapse.events import FrozenEventV3 +from synapse.federation.federation_client import SendJoinResult +from synapse.server import HomeServer +from synapse.types import UserID, create_requester +from synapse.util import Clock + +from tests.replication._base import RedisMultiWorkerStreamTestCase +from tests.server import make_request +from tests.test_utils import make_awaitable +from tests.unittest import FederatingHomeserverTestCase, override_config + + +class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + synapse.rest.client.room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.handler = hs.get_room_member_handler() + + # Create three users. + self.alice = self.register_user("alice", "pass") + self.alice_token = self.login("alice", "pass") + self.bob = self.register_user("bob", "pass") + self.bob_token = self.login("bob", "pass") + self.chris = self.register_user("chris", "pass") + self.chris_token = self.login("chris", "pass") + + # Create a room on this homeserver. Note that this counts as a join: it + # contributes to the rate limter's count of actions + self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.intially_unjoined_room_id = f"!example:{self.OTHER_SERVER_NAME}" + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + def test_local_user_local_joins_contribute_to_limit_and_are_limited(self) -> None: + # The rate limiter has accumulated one token from Alice's join after the create + # event. + # Try joining the room as Bob. + self.get_success( + self.handler.update_membership( + requester=create_requester(self.bob), + target=UserID.from_string(self.bob), + room_id=self.room_id, + action=Membership.JOIN, + ) + ) + + # The rate limiter bucket is full. A second join should be denied. + self.get_failure( + self.handler.update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.room_id, + action=Membership.JOIN, + ), + LimitExceededError, + ) + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + def test_local_user_profile_edits_dont_contribute_to_limit(self) -> None: + # The rate limiter has accumulated one token from Alice's join after the create + # event. Alice should still be able to change her displayname. + self.get_success( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.alice), + room_id=self.room_id, + action=Membership.JOIN, + content={"displayname": "Alice Cooper"}, + ) + ) + + # Still room in the limiter bucket. Chris's join should be accepted. + self.get_success( + self.handler.update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.room_id, + action=Membership.JOIN, + ) + ) + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 1}}) + def test_remote_joins_contribute_to_rate_limit(self) -> None: + # Join once, to fill the rate limiter bucket. + # + # To do this we have to mock the responses from the remote homeserver. + # We also patch out a bunch of event checks on our end. All we're really + # trying to check here is that remote joins will bump the rate limter when + # they are persisted. + create_event_source = { + "auth_events": [], + "content": { + "creator": f"@creator:{self.OTHER_SERVER_NAME}", + "room_version": self.hs.config.server.default_room_version.identifier, + }, + "depth": 0, + "origin_server_ts": 0, + "prev_events": [], + "room_id": self.intially_unjoined_room_id, + "sender": f"@creator:{self.OTHER_SERVER_NAME}", + "state_key": "", + "type": EventTypes.Create, + } + self.add_hashes_and_signatures_from_other_server( + create_event_source, + self.hs.config.server.default_room_version, + ) + create_event = FrozenEventV3( + create_event_source, + self.hs.config.server.default_room_version, + {}, + None, + ) + + join_event_source = { + "auth_events": [create_event.event_id], + "content": {"membership": "join"}, + "depth": 1, + "origin_server_ts": 100, + "prev_events": [create_event.event_id], + "sender": self.bob, + "state_key": self.bob, + "room_id": self.intially_unjoined_room_id, + "type": EventTypes.Member, + } + add_hashes_and_signatures( + self.hs.config.server.default_room_version, + join_event_source, + self.hs.hostname, + self.hs.signing_key, + ) + join_event = FrozenEventV3( + join_event_source, + self.hs.config.server.default_room_version, + {}, + None, + ) + + mock_make_membership_event = Mock( + return_value=make_awaitable( + ( + self.OTHER_SERVER_NAME, + join_event, + self.hs.config.server.default_room_version, + ) + ) + ) + mock_send_join = Mock( + return_value=make_awaitable( + SendJoinResult( + join_event, + self.OTHER_SERVER_NAME, + state=[create_event], + auth_chain=[create_event], + partial_state=False, + servers_in_room=[], + ) + ) + ) + + with patch.object( + self.handler.federation_handler.federation_client, + "make_membership_event", + mock_make_membership_event, + ), patch.object( + self.handler.federation_handler.federation_client, + "send_join", + mock_send_join, + ), patch( + "synapse.event_auth._is_membership_change_allowed", + return_value=None, + ), patch( + "synapse.handlers.federation_event.check_state_dependent_auth_rules", + return_value=None, + ): + self.get_success( + self.handler.update_membership( + requester=create_requester(self.bob), + target=UserID.from_string(self.bob), + room_id=self.intially_unjoined_room_id, + action=Membership.JOIN, + remote_room_hosts=[self.OTHER_SERVER_NAME], + ) + ) + + # Try to join as Chris. Should get denied. + self.get_failure( + self.handler.update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.intially_unjoined_room_id, + action=Membership.JOIN, + remote_room_hosts=[self.OTHER_SERVER_NAME], + ), + LimitExceededError, + ) + + # TODO: test that remote joins to a room are rate limited. + # Could do this by setting the burst count to 1, then: + # - remote-joining a room + # - immediately leaving + # - trying to remote-join again. + + +class TestReplicatedJoinsLimitedByPerRoomRateLimiter(RedisMultiWorkerStreamTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + synapse.rest.client.room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.handler = hs.get_room_member_handler() + + # Create three users. + self.alice = self.register_user("alice", "pass") + self.alice_token = self.login("alice", "pass") + self.bob = self.register_user("bob", "pass") + self.bob_token = self.login("bob", "pass") + self.chris = self.register_user("chris", "pass") + self.chris_token = self.login("chris", "pass") + + # Create a room on this homeserver. + # Note that this counts as a + self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + self.intially_unjoined_room_id = "!example:otherhs" + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + def test_local_users_joining_on_another_worker_contribute_to_rate_limit( + self, + ) -> None: + # The rate limiter has accumulated one token from Alice's join after the create + # event. + self.replicate() + + # Spawn another worker and have bob join via it. + worker_app = self.make_worker_hs( + "synapse.app.generic_worker", extra_config={"worker_name": "other worker"} + ) + worker_site = self._hs_to_site[worker_app] + channel = make_request( + self.reactor, + worker_site, + "POST", + f"/_matrix/client/v3/rooms/{self.room_id}/join", + access_token=self.bob_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + + # wait for join to arrive over replication + self.replicate() + + # Try to join as Chris on the worker. Should get denied because Alice + # and Bob have both joined the room. + self.get_failure( + worker_app.get_room_member_handler().update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.room_id, + action=Membership.JOIN, + ), + LimitExceededError, + ) + + # Try to join as Chris on the original worker. Should get denied because Alice + # and Bob have both joined the room. + self.get_failure( + self.handler.update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.room_id, + action=Membership.JOIN, + ), + LimitExceededError, + ) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 17571b2d33..c45cb32090 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -710,7 +710,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(43, channel.resource_usage.db_txn_count) + self.assertEqual(44, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -723,7 +723,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(49, channel.resource_usage.db_txn_count) + self.assertEqual(50, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id diff --git a/tests/test_server.py b/tests/test_server.py index fc4bce899c..2fe4411401 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -231,7 +231,7 @@ class OptionsResourceTests(unittest.TestCase): parse_listener_def({"type": "http", "port": 0}), self.resource, "1.0", - max_request_body_size=1234, + max_request_body_size=4096, reactor=self.reactor, ) diff --git a/tests/unittest.py b/tests/unittest.py index 9f1ff774a8..66ce92f4a6 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -284,7 +284,7 @@ class HomeserverTestCase(TestCase): config=self.hs.config.server.listeners[0], resource=self.resource, server_version_string="1", - max_request_body_size=1234, + max_request_body_size=4096, reactor=self.reactor, ) @@ -773,7 +773,7 @@ class FederatingHomeserverTestCase(HomeserverTestCase): verify_key_id, FetchKeyResult( verify_key=verify_key, - valid_until_ts=clock.time_msec() + 1000, + valid_until_ts=clock.time_msec() + 10000, ), ) ], diff --git a/tests/utils.py b/tests/utils.py index 424cc4c2a0..d2c6d1e852 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -167,6 +167,7 @@ def default_config( "local": {"per_second": 10000, "burst_count": 10000}, "remote": {"per_second": 10000, "burst_count": 10000}, }, + "rc_joins_per_room": {"per_second": 10000, "burst_count": 10000}, "rc_invites": { "per_room": {"per_second": 10000, "burst_count": 10000}, "per_user": {"per_second": 10000, "burst_count": 10000}, -- cgit 1.4.1 From 87a917e8c8bd8d999e0a4f5badb58a470ab83ca2 Mon Sep 17 00:00:00 2001 From: Jörg Behrmann Date: Tue, 19 Jul 2022 14:36:29 +0200 Subject: Add notes when config options were changed to config documentation (#13314) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörg Behrmann --- changelog.d/13314.doc | 1 + docs/usage/configuration/config_documentation.md | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/13314.doc diff --git a/changelog.d/13314.doc b/changelog.d/13314.doc new file mode 100644 index 0000000000..75c71ef27a --- /dev/null +++ b/changelog.d/13314.doc @@ -0,0 +1 @@ +Add notes when config options where changed. Contributed by @behrmann. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index be272a400c..995c5052f1 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -239,6 +239,8 @@ If this option is provided, it parses the given yaml to json and serves it on `/.well-known/matrix/client` endpoint alongside the standard properties. +*Added in Synapse 1.62.0.* + Example configuration: ```yaml extra_well_known_client_content : @@ -1155,6 +1157,9 @@ Caching can be configured through the following sub-options: with intermittent connections, at the cost of higher memory usage. A value of zero means that sync responses are not cached. Defaults to 2m. + + *Changed in Synapse 1.62.0*: The default was changed from 0 to 2m. + * `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu) -- cgit 1.4.1 From 84c5e6b1fd67931c919901c733647ff0f61e2759 Mon Sep 17 00:00:00 2001 From: villepeh <100730729+villepeh@users.noreply.github.com> Date: Tue, 19 Jul 2022 15:37:20 +0300 Subject: Bash script for creating multiple stream writers (#13271) Add another bash script to the contrib directory. It creates multiple stream writers and also prints out the example configuration for homeserver.yaml. Signed-off-by: Ville Petteri Huh. --- changelog.d/13271.doc | 1 + .../create-multiple-generic-workers.md | 31 +++++ .../create-multiple-stream-writers.md | 145 +++++++++++++++++++++ .../create-multiple-workers.md | 31 ----- 4 files changed, 177 insertions(+), 31 deletions(-) create mode 100644 changelog.d/13271.doc create mode 100644 contrib/workers-bash-scripts/create-multiple-generic-workers.md create mode 100644 contrib/workers-bash-scripts/create-multiple-stream-writers.md delete mode 100644 contrib/workers-bash-scripts/create-multiple-workers.md diff --git a/changelog.d/13271.doc b/changelog.d/13271.doc new file mode 100644 index 0000000000..b50e60d029 --- /dev/null +++ b/changelog.d/13271.doc @@ -0,0 +1 @@ +Add another `contrib` script to help set up worker processes. Contributed by @villepeh. diff --git a/contrib/workers-bash-scripts/create-multiple-generic-workers.md b/contrib/workers-bash-scripts/create-multiple-generic-workers.md new file mode 100644 index 0000000000..d303101429 --- /dev/null +++ b/contrib/workers-bash-scripts/create-multiple-generic-workers.md @@ -0,0 +1,31 @@ +# Creating multiple generic workers with a bash script + +Setting up multiple worker configuration files manually can be time-consuming. +You can alternatively create multiple worker configuration files with a simple `bash` script. For example: + +```sh +#!/bin/bash +for i in {1..5} +do +cat << EOF >> generic_worker$i.yaml +worker_app: synapse.app.generic_worker +worker_name: generic_worker$i + +# The replication listener on the main synapse process. +worker_replication_host: 127.0.0.1 +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: 808$i + resources: + - names: [client, federation] + +worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml +EOF +done +``` + +This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085. + +Customise the script to your needs. diff --git a/contrib/workers-bash-scripts/create-multiple-stream-writers.md b/contrib/workers-bash-scripts/create-multiple-stream-writers.md new file mode 100644 index 0000000000..0d2ca780a6 --- /dev/null +++ b/contrib/workers-bash-scripts/create-multiple-stream-writers.md @@ -0,0 +1,145 @@ +# Creating multiple stream writers with a bash script + +This script creates multiple [stream writer](https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#stream-writers) workers. + +Stream writers require both replication and HTTP listeners. + +It also prints out the example lines for Synapse main configuration file. + +Remember to route necessary endpoints directly to a worker associated with it. + +If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array. + +```sh +#!/bin/bash + +# Start with these replication and http ports. +# The script loop starts with the exact port and then increments it by one. +REP_START_PORT=8034 +HTTP_START_PORT=8044 + +# Stream writer workers to generate. Feel free to add or remove them as you wish. +# Event persister ("events") isn't included here as it does not require its +# own HTTP listener. + +STREAM_WRITERS+=( "presence" "typing" "receipts" "to_device" "account_data" ) + +NUM_WRITERS=$(expr ${#STREAM_WRITERS[@]}) + +i=0 + +while [ $i -lt "$NUM_WRITERS" ] +do +cat << EOF > ${STREAM_WRITERS[$i]}_stream_writer.yaml +worker_app: synapse.app.generic_worker +worker_name: ${STREAM_WRITERS[$i]}_stream_writer + +# The replication listener on the main synapse process. +worker_replication_host: 127.0.0.1 +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: $(expr $REP_START_PORT + $i) + resources: + - names: [replication] + + - type: http + port: $(expr $HTTP_START_PORT + $i) + resources: + - names: [client] + +worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml +EOF +HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer: + host: 127.0.0.1 + port: $(expr $REP_START_PORT + $i) +" + +HOMESERVER_YAML_STREAM_WRITERS+=$" ${STREAM_WRITERS[$i]}: ${STREAM_WRITERS[$i]}_stream_writer +" + +((i++)) +done + +cat << EXAMPLECONFIG +# Add these lines to your homeserver.yaml. +# Don't forget to configure your reverse proxy and +# necessary endpoints to their respective worker. + +# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md +# for more information. + +# Remember: Under NO circumstances should the replication +# listener be exposed to the public internet; +# it has no authentication and is unencrypted. + +instance_map: +$HOMESERVER_YAML_INSTANCE_MAP +stream_writers: +$HOMESERVER_YAML_STREAM_WRITERS +EXAMPLECONFIG +``` + +Copy the code above save it to a file ```create_stream_writers.sh``` (for example). + +Make the script executable by running ```chmod +x create_stream_writers.sh```. + +## Run the script to create workers and print out a sample configuration + +Simply run the script to create YAML files in the current folder and print out the required configuration for ```homeserver.yaml```. + +```console +$ ./create_stream_writers.sh + +# Add these lines to your homeserver.yaml. +# Don't forget to configure your reverse proxy and +# necessary endpoints to their respective worker. + +# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md +# for more information + +# Remember: Under NO circumstances should the replication +# listener be exposed to the public internet; +# it has no authentication and is unencrypted. + +instance_map: + presence_stream_writer: + host: 127.0.0.1 + port: 8034 + typing_stream_writer: + host: 127.0.0.1 + port: 8035 + receipts_stream_writer: + host: 127.0.0.1 + port: 8036 + to_device_stream_writer: + host: 127.0.0.1 + port: 8037 + account_data_stream_writer: + host: 127.0.0.1 + port: 8038 + +stream_writers: + presence: presence_stream_writer + typing: typing_stream_writer + receipts: receipts_stream_writer + to_device: to_device_stream_writer + account_data: account_data_stream_writer +``` + +Simply copy-and-paste the output to an appropriate place in your Synapse main configuration file. + +## Write directly to Synapse configuration file + +You could also write the output directly to homeserver main configuration file. **This, however, is not recommended** as even a small typo (such as replacing >> with >) can erase the entire ```homeserver.yaml```. + +If you do this, back up your original configuration file first: + +```console +# Back up homeserver.yaml first +cp /etc/matrix-synapse/homeserver.yaml /etc/matrix-synapse/homeserver.yaml.bak + +# Create workers and write output to your homeserver.yaml +./create_stream_writers.sh >> /etc/matrix-synapse/homeserver.yaml +``` diff --git a/contrib/workers-bash-scripts/create-multiple-workers.md b/contrib/workers-bash-scripts/create-multiple-workers.md deleted file mode 100644 index ad5142fe15..0000000000 --- a/contrib/workers-bash-scripts/create-multiple-workers.md +++ /dev/null @@ -1,31 +0,0 @@ -# Creating multiple workers with a bash script - -Setting up multiple worker configuration files manually can be time-consuming. -You can alternatively create multiple worker configuration files with a simple `bash` script. For example: - -```sh -#!/bin/bash -for i in {1..5} -do -cat << EOF >> generic_worker$i.yaml -worker_app: synapse.app.generic_worker -worker_name: generic_worker$i - -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - -worker_listeners: - - type: http - port: 808$i - resources: - - names: [client, federation] - -worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml -EOF -done -``` - -This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085. - -Customise the script to your needs. -- cgit 1.4.1 From 6faaf76a3239c3fd1a9adc0b45b04fcf43237824 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 19 Jul 2022 13:38:29 +0100 Subject: Remove 'anonymised' from the phone home stats documentation (#13321) --- CHANGES.md | 2 +- changelog.d/13321.doc | 1 + debian/changelog | 7 ++ debian/matrix-synapse-py3.postinst | 2 +- debian/po/templates.pot | 12 ++-- debian/templates | 13 ++-- docs/SUMMARY.md | 2 +- .../monitoring/reporting_anonymised_statistics.md | 81 ---------------------- .../reporting_homeserver_usage_statistics.md | 81 ++++++++++++++++++++++ docs/usage/configuration/config_documentation.md | 11 ++- synapse/_scripts/generate_config.py | 2 +- synapse/config/_base.py | 14 ++-- 12 files changed, 121 insertions(+), 107 deletions(-) create mode 100644 changelog.d/13321.doc delete mode 100644 docs/usage/administration/monitoring/reporting_anonymised_statistics.md create mode 100644 docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md diff --git a/CHANGES.md b/CHANGES.md index 4071f973de..a1f918986f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -34,7 +34,7 @@ Improved Documentation - Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029)) - Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032)) - Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166)) -- Add documentation for anonymised homeserver statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086)) +- Add documentation for homeserver usage statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086)) - Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212)) - Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077), [\#13139](https://github.com/matrix-org/synapse/issues/13139)) - Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132)) diff --git a/changelog.d/13321.doc b/changelog.d/13321.doc new file mode 100644 index 0000000000..a7469e9242 --- /dev/null +++ b/changelog.d/13321.doc @@ -0,0 +1 @@ +Clarify that homeserver server names are included in the reported data when the `report_stats` config option is enabled. diff --git a/debian/changelog b/debian/changelog index 9f4352586d..a9dbe34705 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +matrix-synapse-py3 (1.63.0~rc1+nmu1) UNRELEASED; urgency=medium + + * Clarify that homeserver server names are included in the data reported + by opt-in server stats reporting (`report_stats` homeserver config option). + + -- Synapse Packaging team Tue, 19 Jul 2022 12:00:14 +0100 + matrix-synapse-py3 (1.63.0~rc1) stable; urgency=medium * New Synapse release 1.63.0rc1. diff --git a/debian/matrix-synapse-py3.postinst b/debian/matrix-synapse-py3.postinst index a8dde1e082..029b9e0243 100644 --- a/debian/matrix-synapse-py3.postinst +++ b/debian/matrix-synapse-py3.postinst @@ -31,7 +31,7 @@ EOF # This file is autogenerated, and will be recreated on upgrade if it is deleted. # Any changes you make will be preserved. -# Whether to report anonymized homeserver usage statistics. +# Whether to report homeserver usage statistics. report_stats: false EOF fi diff --git a/debian/po/templates.pot b/debian/po/templates.pot index f0af9e70fb..445e4aac81 100644 --- a/debian/po/templates.pot +++ b/debian/po/templates.pot @@ -37,7 +37,7 @@ msgstr "" #. Type: boolean #. Description #: ../templates:2001 -msgid "Report anonymous statistics?" +msgid "Report homeserver usage statistics?" msgstr "" #. Type: boolean @@ -45,11 +45,11 @@ msgstr "" #: ../templates:2001 msgid "" "Developers of Matrix and Synapse really appreciate helping the project out " -"by reporting anonymized usage statistics from this homeserver. Only very " -"basic aggregate data (e.g. number of users) will be reported, but it helps " -"track the growth of the Matrix community, and helps in making Matrix a " -"success, as well as to convince other networks that they should peer with " -"Matrix." +"by reporting homeserver usage statistics from this homeserver. Your " +"homeserver's server name, along with very basic aggregate data (e.g. " +"number of users) will be reported. But it helps track the growth of the " +"Matrix community, and helps in making Matrix a success, as well as to " +"convince other networks that they should peer with Matrix." msgstr "" #. Type: boolean diff --git a/debian/templates b/debian/templates index 458fe8bbe9..23e24e1059 100644 --- a/debian/templates +++ b/debian/templates @@ -10,12 +10,13 @@ _Description: Name of the server: Template: matrix-synapse/report-stats Type: boolean Default: false -_Description: Report anonymous statistics? +_Description: Report homeserver usage statistics? Developers of Matrix and Synapse really appreciate helping the - project out by reporting anonymized usage statistics from this - homeserver. Only very basic aggregate data (e.g. number of users) - will be reported, but it helps track the growth of the Matrix - community, and helps in making Matrix a success, as well as to - convince other networks that they should peer with Matrix. + project out by reporting homeserver usage statistics from this + homeserver. Your homeserver's server name, along with very basic + aggregate data (e.g. number of users) will be reported. But it + helps track the growth of the Matrix community, and helps in + making Matrix a success, as well as to convince other networks + that they should peer with Matrix. . Thank you. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8d6030e34a..2e8c4e2137 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -69,7 +69,7 @@ - [Federation](usage/administration/admin_api/federation.md) - [Manhole](manhole.md) - [Monitoring](metrics-howto.md) - - [Reporting Anonymised Statistics](usage/administration/monitoring/reporting_anonymised_statistics.md) + - [Reporting Homeserver Usage Statistics](usage/administration/monitoring/reporting_homeserver_usage_statistics.md) - [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md) - [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md) - [Database Maintenance Tools](usage/administration/database_maintenance_tools.md) diff --git a/docs/usage/administration/monitoring/reporting_anonymised_statistics.md b/docs/usage/administration/monitoring/reporting_anonymised_statistics.md deleted file mode 100644 index 4f1e0fecf5..0000000000 --- a/docs/usage/administration/monitoring/reporting_anonymised_statistics.md +++ /dev/null @@ -1,81 +0,0 @@ -# Reporting Anonymised Statistics - -When generating your Synapse configuration file, you are asked whether you -would like to report anonymised statistics to Matrix.org. These statistics -provide the foundation a glimpse into the number of Synapse homeservers -participating in the network, as well as statistics such as the number of -rooms being created and messages being sent. This feature is sometimes -affectionately called "phone-home" stats. Reporting -[is optional](../../configuration/config_documentation.md#report_stats) -and the reporting endpoint -[can be configured](../../configuration/config_documentation.md#report_stats_endpoint), -in case you would like to instead report statistics from a set of homeservers -to your own infrastructure. - -This documentation aims to define the statistics available and the -homeserver configuration options that exist to tweak it. - -## Available Statistics - -The following statistics are sent to the configured reporting endpoint: - -| Statistic Name | Type | Description | -|----------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `memory_rss` | int | The memory usage of the process (in kilobytes on Unix-based systems, bytes on MacOS). | -| `cpu_average` | int | CPU time in % of a single core (not % of all cores). | -| `homeserver` | string | The homeserver's server name. | -| `server_context` | string | An arbitrary string used to group statistics from a set of homeservers. | -| `timestamp` | int | The current time, represented as the number of seconds since the epoch. | -| `uptime_seconds` | int | The number of seconds since the homeserver was last started. | -| `python_version` | string | The Python version number in use (e.g "3.7.1"). Taken from `sys.version_info`. | -| `total_users` | int | The number of registered users on the homeserver. | -| `total_nonbridged_users` | int | The number of users, excluding those created by an Application Service. | -| `daily_user_type_native` | int | The number of native users created in the last 24 hours. | -| `daily_user_type_guest` | int | The number of guest users created in the last 24 hours. | -| `daily_user_type_bridged` | int | The number of users created by Application Services in the last 24 hours. | -| `total_room_count` | int | The total number of rooms present on the homeserver. | -| `daily_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 24 hours. | -| `monthly_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 30 days. | -| `daily_active_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.message` sent in them in the last 24 hours. | -| `daily_active_e2ee_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.encrypted` sent in them in the last 24 hours. | -| `daily_messages` | int | The number of (state) events with the type `m.room.message` seen in the last 24 hours. | -| `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. | -| `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. | -| `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. | -| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. | -| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. | -| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. | -| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. | -| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. | -| `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. | -| `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. | -| `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. | -| `r30v2_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "electron" (case-insensitive) in the user agent string. | -| `r30v2_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "mozilla" or "gecko" (case-insensitive) in the user agent string. | -| `cache_factor` | int | The configured [`global factor`](../../configuration/config_documentation.md#caching) value for caching. | -| `event_cache_size` | int | The configured [`event_cache_size`](../../configuration/config_documentation.md#caching) value for caching. | -| `database_engine` | string | The database engine that is in use. Either "psycopg2" meaning PostgreSQL is in use, or "sqlite3" for SQLite3. | -| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. | -| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. | - - -[^1]: Native matrix users and guests are always counted. If the -[`track_puppeted_user_ips`](../../configuration/config_documentation.md#track_puppeted_user_ips) -option is set to `true`, "puppeted" users (users that an Application Service have performed -[an action on behalf of](https://spec.matrix.org/v1.3/application-service-api/#identity-assertion)) -will also be counted. Note that an Application Service can "puppet" any user in their -[user namespace](https://spec.matrix.org/v1.3/application-service-api/#registration), -not only users that the Application Service has created. If this happens, the Application Service -will additionally be counted as a user (irrespective of `track_puppeted_user_ips`). - -## Using a Custom Statistics Collection Server - -If statistics reporting is enabled, the endpoint that Synapse sends metrics to is configured by the -[`report_stats_endpoint`](../../configuration/config_documentation.md#report_stats_endpoint) config -option. By default, statistics are sent to Matrix.org. - -If you would like to set up your own statistics collection server and send metrics there, you may -consider using one of the following known implementations: - -* [Matrix.org's Panopticon](https://github.com/matrix-org/panopticon) -* [Famedly's Barad-dûr](https://gitlab.com/famedly/company/devops/services/barad-dur) diff --git a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md new file mode 100644 index 0000000000..4e53f9883a --- /dev/null +++ b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md @@ -0,0 +1,81 @@ +# Reporting Homeserver Usage Statistics + +When generating your Synapse configuration file, you are asked whether you +would like to report usage statistics to Matrix.org. These statistics +provide the foundation a glimpse into the number of Synapse homeservers +participating in the network, as well as statistics such as the number of +rooms being created and messages being sent. This feature is sometimes +affectionately called "phone home" stats. Reporting +[is optional](../../configuration/config_documentation.md#report_stats) +and the reporting endpoint +[can be configured](../../configuration/config_documentation.md#report_stats_endpoint), +in case you would like to instead report statistics from a set of homeservers +to your own infrastructure. + +This documentation aims to define the statistics available and the +homeserver configuration options that exist to tweak it. + +## Available Statistics + +The following statistics are sent to the configured reporting endpoint: + +| Statistic Name | Type | Description | +|----------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `homeserver` | string | The homeserver's server name. | +| `memory_rss` | int | The memory usage of the process (in kilobytes on Unix-based systems, bytes on MacOS). | +| `cpu_average` | int | CPU time in % of a single core (not % of all cores). | +| `server_context` | string | An arbitrary string used to group statistics from a set of homeservers. | +| `timestamp` | int | The current time, represented as the number of seconds since the epoch. | +| `uptime_seconds` | int | The number of seconds since the homeserver was last started. | +| `python_version` | string | The Python version number in use (e.g "3.7.1"). Taken from `sys.version_info`. | +| `total_users` | int | The number of registered users on the homeserver. | +| `total_nonbridged_users` | int | The number of users, excluding those created by an Application Service. | +| `daily_user_type_native` | int | The number of native users created in the last 24 hours. | +| `daily_user_type_guest` | int | The number of guest users created in the last 24 hours. | +| `daily_user_type_bridged` | int | The number of users created by Application Services in the last 24 hours. | +| `total_room_count` | int | The total number of rooms present on the homeserver. | +| `daily_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 24 hours. | +| `monthly_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 30 days. | +| `daily_active_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.message` sent in them in the last 24 hours. | +| `daily_active_e2ee_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.encrypted` sent in them in the last 24 hours. | +| `daily_messages` | int | The number of (state) events with the type `m.room.message` seen in the last 24 hours. | +| `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. | +| `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. | +| `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. | +| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. | +| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. | +| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. | +| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. | +| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. | +| `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. | +| `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. | +| `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. | +| `r30v2_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "electron" (case-insensitive) in the user agent string. | +| `r30v2_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "mozilla" or "gecko" (case-insensitive) in the user agent string. | +| `cache_factor` | int | The configured [`global factor`](../../configuration/config_documentation.md#caching) value for caching. | +| `event_cache_size` | int | The configured [`event_cache_size`](../../configuration/config_documentation.md#caching) value for caching. | +| `database_engine` | string | The database engine that is in use. Either "psycopg2" meaning PostgreSQL is in use, or "sqlite3" for SQLite3. | +| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. | +| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. | + + +[^1]: Native matrix users and guests are always counted. If the +[`track_puppeted_user_ips`](../../configuration/config_documentation.md#track_puppeted_user_ips) +option is set to `true`, "puppeted" users (users that an Application Service have performed +[an action on behalf of](https://spec.matrix.org/v1.3/application-service-api/#identity-assertion)) +will also be counted. Note that an Application Service can "puppet" any user in their +[user namespace](https://spec.matrix.org/v1.3/application-service-api/#registration), +not only users that the Application Service has created. If this happens, the Application Service +will additionally be counted as a user (irrespective of `track_puppeted_user_ips`). + +## Using a Custom Statistics Collection Server + +If statistics reporting is enabled, the endpoint that Synapse sends metrics to is configured by the +[`report_stats_endpoint`](../../configuration/config_documentation.md#report_stats_endpoint) config +option. By default, statistics are sent to Matrix.org. + +If you would like to set up your own statistics collection server and send metrics there, you may +consider using one of the following known implementations: + +* [Matrix.org's Panopticon](https://github.com/matrix-org/panopticon) +* [Famedly's Barad-dûr](https://gitlab.com/famedly/company/devops/services/barad-dur) diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 5deabb53d7..6d7d700a13 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2409,9 +2409,14 @@ metrics_flags: --- ### `report_stats` -Whether or not to report anonymized homeserver usage statistics. This is originally +Whether or not to report homeserver usage statistics. This is originally set when generating the config. Set this option to true or false to change the current -behavior. +behavior. See +[Reporting Homeserver Usage Statistics](../administration/monitoring/reporting_homeserver_usage_statistics.md) +for information on what data is reported. + +Statistics will be reported 5 minutes after Synapse starts, and then every 3 hours +after that. Example configuration: ```yaml @@ -2420,7 +2425,7 @@ report_stats: true --- ### `report_stats_endpoint` -The endpoint to report the anonymized homeserver usage statistics to. +The endpoint to report homeserver usage statistics to. Defaults to https://matrix.org/report-usage-stats/push Example configuration: diff --git a/synapse/_scripts/generate_config.py b/synapse/_scripts/generate_config.py index 08eb8ef114..06c11c60da 100755 --- a/synapse/_scripts/generate_config.py +++ b/synapse/_scripts/generate_config.py @@ -33,7 +33,7 @@ def main() -> None: parser.add_argument( "--report-stats", action="store", - help="Whether the generated config reports anonymized usage statistics", + help="Whether the generated config reports homeserver usage statistics", choices=["yes", "no"], ) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 095eca16c5..7c9cf403ef 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -97,16 +97,16 @@ def format_config_error(e: ConfigError) -> Iterator[str]: # We split these messages out to allow packages to override with package # specific instructions. MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\ -Please opt in or out of reporting anonymized homeserver usage statistics, by -setting the `report_stats` key in your config file to either True or False. +Please opt in or out of reporting homeserver usage statistics, by setting +the `report_stats` key in your config file to either True or False. """ MISSING_REPORT_STATS_SPIEL = """\ We would really appreciate it if you could help our project out by reporting -anonymized usage statistics from your homeserver. Only very basic aggregate -data (e.g. number of users) will be reported, but it helps us to track the -growth of the Matrix community, and helps us to make Matrix a success, as well -as to convince other networks that they should peer with us. +homeserver usage statistics from your homeserver. Your homeserver's server name, +along with very basic aggregate data (e.g. number of users) will be reported. But +it helps us to track the growth of the Matrix community, and helps us to make Matrix +a success, as well as to convince other networks that they should peer with us. Thank you. """ @@ -621,7 +621,7 @@ class RootConfig: generate_group.add_argument( "--report-stats", action="store", - help="Whether the generated config reports anonymized usage statistics.", + help="Whether the generated config reports homeserver usage statistics.", choices=["yes", "no"], ) generate_group.add_argument( -- cgit 1.4.1 From 097afd0e0b49099cae2f5c03abc15f3a66894405 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 19 Jul 2022 14:43:28 +0200 Subject: 1.63.0 --- CHANGES.md | 9 +++++++++ changelog.d/13321.doc | 1 - debian/changelog | 5 +++-- pyproject.toml | 2 +- 4 files changed, 13 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/13321.doc diff --git a/CHANGES.md b/CHANGES.md index a1f918986f..c8da19676c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.63.0 (2022-07-19) +=========================== + +Improved Documentation +---------------------- + +- Clarify that homeserver server names are included in the reported data when the `report_stats` config option is enabled. ([\#13321](https://github.com/matrix-org/synapse/issues/13321)) + + Synapse 1.63.0rc1 (2022-07-12) ============================== diff --git a/changelog.d/13321.doc b/changelog.d/13321.doc deleted file mode 100644 index a7469e9242..0000000000 --- a/changelog.d/13321.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify that homeserver server names are included in the reported data when the `report_stats` config option is enabled. diff --git a/debian/changelog b/debian/changelog index a9dbe34705..b1635bc392 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,10 @@ -matrix-synapse-py3 (1.63.0~rc1+nmu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.63.0) stable; urgency=medium * Clarify that homeserver server names are included in the data reported by opt-in server stats reporting (`report_stats` homeserver config option). + * New Synapse release 1.63.0. - -- Synapse Packaging team Tue, 19 Jul 2022 12:00:14 +0100 + -- Synapse Packaging team Tue, 19 Jul 2022 14:42:24 +0200 matrix-synapse-py3 (1.63.0~rc1) stable; urgency=medium diff --git a/pyproject.toml b/pyproject.toml index f77c02ca27..74a473bd34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.63.0rc1" +version = "1.63.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From 6fccd72f42ac76b185cc20ed258792ff94ee81fb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 19 Jul 2022 14:53:12 +0200 Subject: Improve precision on validation improvements --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index c8da19676c..7eac6131b1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,7 +15,7 @@ Features - Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125)) - Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of `/publicRooms` by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031)) -- Improve validation logic in Synapse's REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148)) +- Improve validation logic in the account data REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148)) Bugfixes -- cgit 1.4.1 From 1efe6b8c41082fca68a6c408a854b25991814d65 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Jul 2022 09:08:46 -0400 Subject: Stop building Ubuntu 21.10 (Impish Indri) which is end of life. (#13326) --- changelog.d/13326.removal | 1 + scripts-dev/build_debian_packages.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/13326.removal diff --git a/changelog.d/13326.removal b/changelog.d/13326.removal new file mode 100644 index 0000000000..8112286671 --- /dev/null +++ b/changelog.d/13326.removal @@ -0,0 +1 @@ +Stop builindg `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 38564893e9..cd2e64b75f 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -26,7 +26,6 @@ DISTS = ( "debian:bookworm", "debian:sid", "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) - "ubuntu:impish", # 21.10 (EOL 2022-07) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) ) -- cgit 1.4.1 From de70b25e844035c26a1bc40fbf6c8982e5d37b45 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 19 Jul 2022 14:40:37 +0100 Subject: Reduce memory usage of state group cache (#13323) --- changelog.d/13323.misc | 1 + synapse/storage/databases/state/bg_updates.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13323.misc diff --git a/changelog.d/13323.misc b/changelog.d/13323.misc new file mode 100644 index 0000000000..3caa94a2f6 --- /dev/null +++ b/changelog.d/13323.misc @@ -0,0 +1 @@ +Reduce memory usage of state caches. diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index fa9eadaca7..a7fcc564a9 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -24,6 +24,7 @@ from synapse.storage.database import ( from synapse.storage.engines import PostgresEngine from synapse.storage.state import StateFilter from synapse.types import MutableStateMap, StateMap +from synapse.util.caches import intern_string if TYPE_CHECKING: from synapse.server import HomeServer @@ -136,7 +137,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): txn.execute(sql % (where_clause,), args) for row in txn: typ, state_key, event_id = row - key = (typ, state_key) + key = (intern_string(typ), intern_string(state_key)) results[group][key] = event_id else: max_entries_returned = state_filter.max_entries_returned() -- cgit 1.4.1 From a6895dd576f96d7fd086fb4128d48ac8a3f098c5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Jul 2022 14:14:30 -0400 Subject: Add type annotations to `trace` decorator. (#13328) Functions that are decorated with `trace` are now properly typed and the type hints for them are fixed. --- changelog.d/13328.misc | 1 + synapse/federation/federation_client.py | 2 +- synapse/federation/transport/client.py | 2 +- synapse/handlers/e2e_keys.py | 16 ++++---- synapse/logging/opentracing.py | 50 +++++++++++++---------- synapse/replication/http/_base.py | 4 +- synapse/rest/client/keys.py | 4 +- synapse/rest/client/room_keys.py | 13 +++--- synapse/rest/client/sendtodevice.py | 4 +- synapse/rest/client/sync.py | 12 +++--- synapse/storage/databases/main/devices.py | 2 +- synapse/storage/databases/main/end_to_end_keys.py | 47 ++++++++++++++++++--- 12 files changed, 102 insertions(+), 55 deletions(-) create mode 100644 changelog.d/13328.misc diff --git a/changelog.d/13328.misc b/changelog.d/13328.misc new file mode 100644 index 0000000000..d15fb5fc37 --- /dev/null +++ b/changelog.d/13328.misc @@ -0,0 +1 @@ +Add type hints to `trace` decorator. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 66e6305562..7c450ecad0 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -217,7 +217,7 @@ class FederationClient(FederationBase): ) async def claim_client_keys( - self, destination: str, content: JsonDict, timeout: int + self, destination: str, content: JsonDict, timeout: Optional[int] ) -> JsonDict: """Claims one-time keys for a device hosted on a remote server. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 9e84bd677e..32074b8ca6 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -619,7 +619,7 @@ class TransportLayerClient: ) async def claim_client_keys( - self, destination: str, query_content: JsonDict, timeout: int + self, destination: str, query_content: JsonDict, timeout: Optional[int] ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 52bb5c9c55..84c28c480e 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple import attr from canonicaljson import encode_canonical_json @@ -92,7 +92,11 @@ class E2eKeysHandler: @trace async def query_devices( - self, query_body: JsonDict, timeout: int, from_user_id: str, from_device_id: str + self, + query_body: JsonDict, + timeout: int, + from_user_id: str, + from_device_id: Optional[str], ) -> JsonDict: """Handle a device key query from a client @@ -120,9 +124,7 @@ class E2eKeysHandler: the number of in-flight queries at a time. """ async with self._query_devices_linearizer.queue((from_user_id, from_device_id)): - device_keys_query: Dict[str, Iterable[str]] = query_body.get( - "device_keys", {} - ) + device_keys_query: Dict[str, List[str]] = query_body.get("device_keys", {}) # separate users by domain. # make a map from domain to user_id to device_ids @@ -392,7 +394,7 @@ class E2eKeysHandler: @trace async def query_local_devices( - self, query: Dict[str, Optional[List[str]]] + self, query: Mapping[str, Optional[List[str]]] ) -> Dict[str, Dict[str, dict]]: """Get E2E device keys for local users @@ -461,7 +463,7 @@ class E2eKeysHandler: @trace async def claim_one_time_keys( - self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: int + self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: Optional[int] ) -> JsonDict: local_query: List[Tuple[str, str, str]] = [] remote_queries: Dict[str, Dict[str, Dict[str, str]]] = {} diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 50c57940f9..17e729f0c7 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -84,14 +84,13 @@ the function becomes the operation name for the span. return something_usual_and_useful -Operation names can be explicitly set for a function by passing the -operation name to ``trace`` +Operation names can be explicitly set for a function by using ``trace_with_opname``: .. code-block:: python - from synapse.logging.opentracing import trace + from synapse.logging.opentracing import trace_with_opname - @trace(opname="a_better_operation_name") + @trace_with_opname("a_better_operation_name") def interesting_badly_named_function(*args, **kwargs): # Does all kinds of cool and expected things return something_usual_and_useful @@ -798,33 +797,31 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte # Tracing decorators -def trace(func=None, opname: Optional[str] = None): +def trace_with_opname(opname: str) -> Callable[[Callable[P, R]], Callable[P, R]]: """ - Decorator to trace a function. - Sets the operation name to that of the function's or that given - as operation_name. See the module's doc string for usage - examples. + Decorator to trace a function with a custom opname. + + See the module's doc string for usage examples. + """ - def decorator(func): + def decorator(func: Callable[P, R]) -> Callable[P, R]: if opentracing is None: return func # type: ignore[unreachable] - _opname = opname if opname else func.__name__ - if inspect.iscoroutinefunction(func): @wraps(func) - async def _trace_inner(*args, **kwargs): - with start_active_span(_opname): - return await func(*args, **kwargs) + async def _trace_inner(*args: P.args, **kwargs: P.kwargs) -> R: + with start_active_span(opname): + return await func(*args, **kwargs) # type: ignore[misc] else: # The other case here handles both sync functions and those # decorated with inlineDeferred. @wraps(func) - def _trace_inner(*args, **kwargs): - scope = start_active_span(_opname) + def _trace_inner(*args: P.args, **kwargs: P.kwargs) -> R: + scope = start_active_span(opname) scope.__enter__() try: @@ -858,12 +855,21 @@ def trace(func=None, opname: Optional[str] = None): scope.__exit__(type(e), None, e.__traceback__) raise - return _trace_inner + return _trace_inner # type: ignore[return-value] - if func: - return decorator(func) - else: - return decorator + return decorator + + +def trace(func: Callable[P, R]) -> Callable[P, R]: + """ + Decorator to trace a function. + + Sets the operation name to that of the function's name. + + See the module's doc string for usage examples. + """ + + return trace_with_opname(func.__name__)(func) def tag_args(func: Callable[P, R]) -> Callable[P, R]: diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index a4ae4040c3..561ad5bf04 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -29,7 +29,7 @@ from synapse.http import RequestTimedOutError from synapse.http.server import HttpServer, is_method_cancellable from synapse.http.site import SynapseRequest from synapse.logging import opentracing -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import trace_with_opname from synapse.types import JsonDict from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import random_string @@ -196,7 +196,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): "ascii" ) - @trace(opname="outgoing_replication_request") + @trace_with_opname("outgoing_replication_request") async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: with outgoing_gauge.track_inprogress(): if instance_name == local_instance_name: diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index ce806e3c11..eb1b85721f 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -26,7 +26,7 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import log_kv, set_tag, trace +from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname from synapse.types import JsonDict, StreamToken from ._base import client_patterns, interactive_auth_handler @@ -71,7 +71,7 @@ class KeyUploadServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() self.device_handler = hs.get_device_handler() - @trace(opname="upload_keys") + @trace_with_opname("upload_keys") async def on_POST( self, request: SynapseRequest, device_id: Optional[str] ) -> Tuple[int, JsonDict]: diff --git a/synapse/rest/client/room_keys.py b/synapse/rest/client/room_keys.py index 37e39570f6..f7081f638e 100644 --- a/synapse/rest/client/room_keys.py +++ b/synapse/rest/client/room_keys.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional, Tuple, cast from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer @@ -127,7 +127,7 @@ class RoomKeysServlet(RestServlet): requester = await self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) - version = parse_string(request, "version") + version = parse_string(request, "version", required=True) if session_id: body = {"sessions": {session_id: body}} @@ -196,8 +196,11 @@ class RoomKeysServlet(RestServlet): user_id = requester.user.to_string() version = parse_string(request, "version", required=True) - room_keys = await self.e2e_room_keys_handler.get_room_keys( - user_id, version, room_id, session_id + room_keys = cast( + JsonDict, + await self.e2e_room_keys_handler.get_room_keys( + user_id, version, room_id, session_id + ), ) # Convert room_keys to the right format to return. @@ -240,7 +243,7 @@ class RoomKeysServlet(RestServlet): requester = await self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = parse_string(request, "version") + version = parse_string(request, "version", required=True) ret = await self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py index 3322c8ef48..1a8e9a96d4 100644 --- a/synapse/rest/client/sendtodevice.py +++ b/synapse/rest/client/sendtodevice.py @@ -19,7 +19,7 @@ from synapse.http import servlet from synapse.http.server import HttpServer from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import set_tag, trace +from synapse.logging.opentracing import set_tag, trace_with_opname from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict @@ -43,7 +43,7 @@ class SendToDeviceRestServlet(servlet.RestServlet): self.txns = HttpTransactionCache(hs) self.device_message_handler = hs.get_device_message_handler() - @trace(opname="sendToDevice") + @trace_with_opname("sendToDevice") def on_PUT( self, request: SynapseRequest, message_type: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 8bbf35148d..c2989765ce 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -37,7 +37,7 @@ from synapse.handlers.sync import ( from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import trace_with_opname from synapse.types import JsonDict, StreamToken from synapse.util import json_decoder @@ -210,7 +210,7 @@ class SyncRestServlet(RestServlet): logger.debug("Event formatting complete") return 200, response_content - @trace(opname="sync.encode_response") + @trace_with_opname("sync.encode_response") async def encode_response( self, time_now: int, @@ -315,7 +315,7 @@ class SyncRestServlet(RestServlet): ] } - @trace(opname="sync.encode_joined") + @trace_with_opname("sync.encode_joined") async def encode_joined( self, rooms: List[JoinedSyncResult], @@ -340,7 +340,7 @@ class SyncRestServlet(RestServlet): return joined - @trace(opname="sync.encode_invited") + @trace_with_opname("sync.encode_invited") async def encode_invited( self, rooms: List[InvitedSyncResult], @@ -371,7 +371,7 @@ class SyncRestServlet(RestServlet): return invited - @trace(opname="sync.encode_knocked") + @trace_with_opname("sync.encode_knocked") async def encode_knocked( self, rooms: List[KnockedSyncResult], @@ -420,7 +420,7 @@ class SyncRestServlet(RestServlet): return knocked - @trace(opname="sync.encode_archived") + @trace_with_opname("sync.encode_archived") async def encode_archived( self, rooms: List[ArchivedSyncResult], diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index adde5d0978..7a6ed332aa 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -669,7 +669,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore): @trace async def get_user_devices_from_cache( - self, query_list: List[Tuple[str, str]] + self, query_list: List[Tuple[str, Optional[str]]] ) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]: """Get the devices (and keys if any) for remote users from the cache. diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 9b293475c8..60f622ad71 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -22,11 +22,14 @@ from typing import ( List, Optional, Tuple, + Union, cast, + overload, ) import attr from canonicaljson import encode_canonical_json +from typing_extensions import Literal from synapse.api.constants import DeviceKeyAlgorithms from synapse.appservice import ( @@ -113,7 +116,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker user_devices = devices[user_id] results = [] for device_id, device in user_devices.items(): - result = {"device_id": device_id} + result: JsonDict = {"device_id": device_id} keys = device.keys if keys: @@ -156,6 +159,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker rv[user_id] = {} for device_id, device_info in device_keys.items(): r = device_info.keys + if r is None: + continue + r["unsigned"] = {} display_name = device_info.display_name if display_name is not None: @@ -164,13 +170,42 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker return rv + @overload + async def get_e2e_device_keys_and_signatures( + self, + query_list: Collection[Tuple[str, Optional[str]]], + include_all_devices: Literal[False] = False, + ) -> Dict[str, Dict[str, DeviceKeyLookupResult]]: + ... + + @overload + async def get_e2e_device_keys_and_signatures( + self, + query_list: Collection[Tuple[str, Optional[str]]], + include_all_devices: bool = False, + include_deleted_devices: Literal[False] = False, + ) -> Dict[str, Dict[str, DeviceKeyLookupResult]]: + ... + + @overload + async def get_e2e_device_keys_and_signatures( + self, + query_list: Collection[Tuple[str, Optional[str]]], + include_all_devices: Literal[True], + include_deleted_devices: Literal[True], + ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]: + ... + @trace async def get_e2e_device_keys_and_signatures( self, - query_list: List[Tuple[str, Optional[str]]], + query_list: Collection[Tuple[str, Optional[str]]], include_all_devices: bool = False, include_deleted_devices: bool = False, - ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]: + ) -> Union[ + Dict[str, Dict[str, DeviceKeyLookupResult]], + Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]], + ]: """Fetch a list of device keys Any cross-signatures made on the keys by the owner of the device are also @@ -1044,7 +1079,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker _claim_e2e_one_time_key = _claim_e2e_one_time_key_simple db_autocommit = False - row = await self.db_pool.runInteraction( + claim_row = await self.db_pool.runInteraction( "claim_e2e_one_time_keys", _claim_e2e_one_time_key, user_id, @@ -1052,11 +1087,11 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker algorithm, db_autocommit=db_autocommit, ) - if row: + if claim_row: device_results = results.setdefault(user_id, {}).setdefault( device_id, {} ) - device_results[row[0]] = row[1] + device_results[claim_row[0]] = claim_row[1] continue # No one-time key available, so see if there's a fallback -- cgit 1.4.1 From 172ce29b149afb91bbf728b88bffb117172a8f2c Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 19 Jul 2022 19:15:54 +0100 Subject: Fix spurious warning when fetching state after a missing prev event (#13258) --- changelog.d/13258.misc | 1 + synapse/handlers/federation_event.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/13258.misc diff --git a/changelog.d/13258.misc b/changelog.d/13258.misc new file mode 100644 index 0000000000..a187c46aa6 --- /dev/null +++ b/changelog.d/13258.misc @@ -0,0 +1 @@ +Fix spurious warning when fetching state after a missing prev event. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 766d9849f5..e4a5b64d10 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1037,6 +1037,9 @@ class FederationEventHandler: # XXX: this doesn't sound right? it means that we'll end up with incomplete # state. failed_to_fetch = desired_events - event_metadata.keys() + # `event_id` could be missing from `event_metadata` because it's not necessarily + # a state event. We've already checked that we've fetched it above. + failed_to_fetch.discard(event_id) if failed_to_fetch: logger.warning( "Failed to fetch missing state events for %s %s", -- cgit 1.4.1 From b4ae3b0d445a5e294d4d3278b62b2add75840424 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Jul 2022 12:06:13 +0100 Subject: Don't include appservice users when calculating push rules (#13332) This can cause a lot of extra load on servers with lots of appservice users. Introduced in #13078 --- changelog.d/13332.bugfix | 1 + synapse/push/bulk_push_rule_evaluator.py | 7 +++ tests/push/test_push_rule_evaluator.py | 85 ++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+) create mode 100644 changelog.d/13332.bugfix diff --git a/changelog.d/13332.bugfix b/changelog.d/13332.bugfix new file mode 100644 index 0000000000..826ed1788f --- /dev/null +++ b/changelog.d/13332.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index e581af9a9a..713dcf6950 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -131,6 +131,13 @@ class BulkPushRuleEvaluator: local_users = await self.store.get_local_users_in_room(event.room_id) + # Filter out appservice users. + local_users = [ + u + for u in local_users + if not self.store.get_if_app_services_interested_in_user(u) + ] + # if this event is an invite event, we may need to run rules for the user # who's been invited, otherwise they won't get told they've been invited if event.type == EventTypes.Member and event.membership == Membership.INVITE: diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 9b623d0033..718f489577 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -16,13 +16,23 @@ from typing import Dict, Optional, Set, Tuple, Union import frozendict +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions +from synapse.appservice import ApplicationService from synapse.events import FrozenEvent from synapse.push import push_rule_evaluator from synapse.push.push_rule_evaluator import PushRuleEvaluatorForEvent +from synapse.rest.client import login, register, room +from synapse.server import HomeServer +from synapse.storage.databases.main.appservice import _make_exclusive_regex from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest +from tests.test_utils.event_injection import create_event, inject_member_event class PushRuleEvaluatorTestCase(unittest.TestCase): @@ -354,3 +364,78 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): "event_type": "*.reaction", } self.assertTrue(evaluator.matches(condition, "@user:test", "foo")) + + +class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase): + """Tests for the bulk push rule evaluator""" + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + register.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + # Define an application service so that we can register appservice users + self._service_token = "some_token" + self._service = ApplicationService( + self._service_token, + "as1", + "@as.sender:test", + namespaces={ + "users": [ + {"regex": "@_as_.*:test", "exclusive": True}, + {"regex": "@as.sender:test", "exclusive": True}, + ] + }, + msc3202_transaction_extensions=True, + ) + self.hs.get_datastores().main.services_cache = [self._service] + self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex( + [self._service] + ) + + self._as_user, _ = self.register_appservice_user( + "_as_user", self._service_token + ) + + self.evaluator = self.hs.get_bulk_push_rule_evaluator() + + def test_ignore_appservice_users(self) -> None: + "Test that we don't generate push for appservice users" + + user_id = self.register_user("user", "pass") + token = self.login("user", "pass") + + room_id = self.helper.create_room_as(user_id, tok=token) + self.get_success( + inject_member_event(self.hs, room_id, self._as_user, Membership.JOIN) + ) + + event, context = self.get_success( + create_event( + self.hs, + type=EventTypes.Message, + room_id=room_id, + sender=user_id, + content={"body": "test", "msgtype": "m.text"}, + ) + ) + + # Assert the returned push rules do not contain the app service user + rules = self.get_success(self.evaluator._get_rules_for_event(event)) + self.assertTrue(self._as_user not in rules) + + # Assert that no push actions have been added to the staging table (the + # sender should not be pushed for the event) + users_with_push_actions = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_onecol( + table="event_push_actions_staging", + keyvalues={"event_id": event.event_id}, + retcol="user_id", + desc="test_ignore_appservice_users", + ) + ) + + self.assertEqual(len(users_with_push_actions), 0) -- cgit 1.4.1 From 93740cae5773536c77c142f84c09267c92866ead Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Jul 2022 13:37:00 +0100 Subject: 1.63.1 --- CHANGES.md | 9 +++++++++ changelog.d/13332.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/13332.bugfix diff --git a/CHANGES.md b/CHANGES.md index 7eac6131b1..143ab4e0e5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.63.1 (2022-07-20) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. ([\#13332](https://github.com/matrix-org/synapse/issues/13332)) + + Synapse 1.63.0 (2022-07-19) =========================== diff --git a/changelog.d/13332.bugfix b/changelog.d/13332.bugfix deleted file mode 100644 index 826ed1788f..0000000000 --- a/changelog.d/13332.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. diff --git a/debian/changelog b/debian/changelog index b1635bc392..9417f8714f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.63.1) stable; urgency=medium + + * New Synapse release 1.63.1. + + -- Synapse Packaging team Wed, 20 Jul 2022 13:36:52 +0100 + matrix-synapse-py3 (1.63.0) stable; urgency=medium * Clarify that homeserver server names are included in the data reported diff --git a/pyproject.toml b/pyproject.toml index 74a473bd34..9eabe15e23 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.63.0" +version = "1.63.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From a1b62af2afc4a5439b7276a02f9fd981fbfd06a4 Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 20 Jul 2022 11:17:26 -0700 Subject: Validate federation destinations and log an error if server name is invalid. (#13318) --- changelog.d/13318.misc | 1 + synapse/http/matrixfederationclient.py | 9 +++++++++ tests/federation/test_federation_client.py | 4 ++-- 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13318.misc diff --git a/changelog.d/13318.misc b/changelog.d/13318.misc new file mode 100644 index 0000000000..f5cd26b862 --- /dev/null +++ b/changelog.d/13318.misc @@ -0,0 +1 @@ +Validate federation destinations and log an error if a destination is invalid. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c63d068f74..3c35b1d2c7 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -79,6 +79,7 @@ from synapse.types import JsonDict from synapse.util import json_decoder from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred from synapse.util.metrics import Measure +from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: from synapse.server import HomeServer @@ -479,6 +480,14 @@ class MatrixFederationHttpClient: RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ + # Validate server name and log if it is an invalid destination, this is + # partially to help track down code paths where we haven't validated before here + try: + parse_and_validate_server_name(request.destination) + except ValueError: + logger.exception(f"Invalid destination: {request.destination}.") + raise FederationDeniedError(request.destination) + if timeout: _sec_timeout = timeout / 1000 else: diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index d2bda07198..cf6b130e4f 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -102,7 +102,7 @@ class FederationClientTest(FederatingHomeserverTestCase): # now fire off the request state_resp, auth_resp = self.get_success( self.hs.get_federation_client().get_room_state( - "yet_another_server", + "yet.another.server", test_room_id, "event_id", RoomVersions.V9, @@ -112,7 +112,7 @@ class FederationClientTest(FederatingHomeserverTestCase): # check the right call got made to the agent self._mock_agent.request.assert_called_once_with( b"GET", - b"matrix://yet_another_server/_matrix/federation/v1/state/%21room_id?event_id=event_id", + b"matrix://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id", headers=mock.ANY, bodyProducer=None, ) -- cgit 1.4.1 From 0f971ca68e808dd16f53f5594a6b33b7bddcc9a9 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 20 Jul 2022 15:58:51 -0500 Subject: Update `get_pdu` to return the original, pristine `EventBase` (#13320) Update `get_pdu` to return the untouched, pristine `EventBase` as it was originally seen over federation (no metadata added). Previously, we returned the same `event` reference that we stored in the cache which downstream code modified in place and added metadata like setting it as an `outlier` and essentially poisoned our cache. Now we always return a copy of the `event` so the original can stay pristine in our cache and re-used for the next cache call. Split out from https://github.com/matrix-org/synapse/pull/13205 As discussed at: - https://github.com/matrix-org/synapse/pull/13205#discussion_r918365746 - https://github.com/matrix-org/synapse/pull/13205#discussion_r918366125 Related to https://github.com/matrix-org/synapse/issues/12584. This PR doesn't fix that issue because it hits [`get_event` which exists from the local database before it tries to `get_pdu`](https://github.com/matrix-org/synapse/blob/7864f33e286dec22368dc0b11c06eebb1462a51e/synapse/federation/federation_client.py#L581-L594). --- changelog.d/13320.misc | 1 + synapse/federation/federation_client.py | 123 ++++++++++++++++++---------- synapse/handlers/federation_event.py | 22 ++++- synapse/storage/databases/main/events.py | 23 +++++- tests/federation/test_federation_client.py | 125 ++++++++++++++++++++++++++--- 5 files changed, 233 insertions(+), 61 deletions(-) create mode 100644 changelog.d/13320.misc diff --git a/changelog.d/13320.misc b/changelog.d/13320.misc new file mode 100644 index 0000000000..d33cf3a25a --- /dev/null +++ b/changelog.d/13320.misc @@ -0,0 +1 @@ +Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 7c450ecad0..842f5327c2 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -53,7 +53,7 @@ from synapse.api.room_versions import ( RoomVersion, RoomVersions, ) -from synapse.events import EventBase, builder +from synapse.events import EventBase, builder, make_event_from_dict from synapse.federation.federation_base import ( FederationBase, InvalidEventSignatureError, @@ -299,7 +299,8 @@ class FederationClient(FederationBase): moving to the next destination. None indicates no timeout. Returns: - The requested PDU, or None if we were unable to find it. + A copy of the requested PDU that is safe to modify, or None if we + were unable to find it. Raises: SynapseError, NotRetryingDestination, FederationDeniedError @@ -309,7 +310,7 @@ class FederationClient(FederationBase): ) logger.debug( - "retrieved event id %s from %s: %r", + "get_pdu_from_destination_raw: retrieved event id %s from %s: %r", event_id, destination, transaction_data, @@ -358,54 +359,92 @@ class FederationClient(FederationBase): The requested PDU, or None if we were unable to find it. """ - # TODO: Rate limit the number of times we try and get the same event. + logger.debug( + "get_pdu: event_id=%s from destinations=%s", event_id, destinations + ) - ev = self._get_pdu_cache.get(event_id) - if ev: - return ev + # TODO: Rate limit the number of times we try and get the same event. - pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) + # We might need the same event multiple times in quick succession (before + # it gets persisted to the database), so we cache the results of the lookup. + # Note that this is separate to the regular get_event cache which caches + # events once they have been persisted. + event = self._get_pdu_cache.get(event_id) + + # If we don't see the event in the cache, go try to fetch it from the + # provided remote federated destinations + if not event: + pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) + + for destination in destinations: + now = self._clock.time_msec() + last_attempt = pdu_attempts.get(destination, 0) + if last_attempt + PDU_RETRY_TIME_MS > now: + logger.debug( + "get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)", + destination, + last_attempt, + PDU_RETRY_TIME_MS, + now, + ) + continue + + try: + event = await self.get_pdu_from_destination_raw( + destination=destination, + event_id=event_id, + room_version=room_version, + timeout=timeout, + ) - signed_pdu = None - for destination in destinations: - now = self._clock.time_msec() - last_attempt = pdu_attempts.get(destination, 0) - if last_attempt + PDU_RETRY_TIME_MS > now: - continue + pdu_attempts[destination] = now - try: - signed_pdu = await self.get_pdu_from_destination_raw( - destination=destination, - event_id=event_id, - room_version=room_version, - timeout=timeout, - ) + if event: + # Prime the cache + self._get_pdu_cache[event.event_id] = event - pdu_attempts[destination] = now + # FIXME: We should add a `break` here to avoid calling every + # destination after we already found a PDU (will follow-up + # in a separate PR) - except SynapseError as e: - logger.info( - "Failed to get PDU %s from %s because %s", event_id, destination, e - ) - continue - except NotRetryingDestination as e: - logger.info(str(e)) - continue - except FederationDeniedError as e: - logger.info(str(e)) - continue - except Exception as e: - pdu_attempts[destination] = now + except SynapseError as e: + logger.info( + "Failed to get PDU %s from %s because %s", + event_id, + destination, + e, + ) + continue + except NotRetryingDestination as e: + logger.info(str(e)) + continue + except FederationDeniedError as e: + logger.info(str(e)) + continue + except Exception as e: + pdu_attempts[destination] = now + + logger.info( + "Failed to get PDU %s from %s because %s", + event_id, + destination, + e, + ) + continue - logger.info( - "Failed to get PDU %s from %s because %s", event_id, destination, e - ) - continue + if not event: + return None - if signed_pdu: - self._get_pdu_cache[event_id] = signed_pdu + # `event` now refers to an object stored in `get_pdu_cache`. Our + # callers may need to modify the returned object (eg to set + # `event.internal_metadata.outlier = true`), so we return a copy + # rather than the original object. + event_copy = make_event_from_dict( + event.get_pdu_json(), + event.room_version, + ) - return signed_pdu + return event_copy async def get_room_state_ids( self, destination: str, room_id: str, event_id: str diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index e4a5b64d10..a5f4ce7c8a 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -766,10 +766,24 @@ class FederationEventHandler: """ logger.info("Processing pulled event %s", event) - # these should not be outliers. - assert ( - not event.internal_metadata.is_outlier() - ), "pulled event unexpectedly flagged as outlier" + # This function should not be used to persist outliers (use something + # else) because this does a bunch of operations that aren't necessary + # (extra work; in particular, it makes sure we have all the prev_events + # and resolves the state across those prev events). If you happen to run + # into a situation where the event you're trying to process/backfill is + # marked as an `outlier`, then you should update that spot to return an + # `EventBase` copy that doesn't have `outlier` flag set. + # + # `EventBase` is used to represent both an event we have not yet + # persisted, and one that we have persisted and now keep in the cache. + # In an ideal world this method would only be called with the first type + # of event, but it turns out that's not actually the case and for + # example, you could get an event from cache that is marked as an + # `outlier` (fix up that spot though). + assert not event.internal_metadata.is_outlier(), ( + "Outlier event passed to _process_pulled_event. " + "To persist an event as a non-outlier, make sure to pass in a copy without `event.internal_metadata.outlier = true`." + ) event_id = event.event_id diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 156e1bd5ab..1f600f1190 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1346,9 +1346,24 @@ class PersistEventsStore: event_id: outlier for event_id, outlier in txn } + logger.debug( + "_update_outliers_txn: events=%s have_persisted=%s", + [ev.event_id for ev, _ in events_and_contexts], + have_persisted, + ) + to_remove = set() for event, context in events_and_contexts: - if event.event_id not in have_persisted: + outlier_persisted = have_persisted.get(event.event_id) + logger.debug( + "_update_outliers_txn: event=%s outlier=%s outlier_persisted=%s", + event.event_id, + event.internal_metadata.is_outlier(), + outlier_persisted, + ) + + # Ignore events which we haven't persisted at all + if outlier_persisted is None: continue to_remove.add(event) @@ -1358,7 +1373,6 @@ class PersistEventsStore: # was an outlier or not - what we have is at least as good. continue - outlier_persisted = have_persisted[event.event_id] if not event.internal_metadata.is_outlier() and outlier_persisted: # We received a copy of an event that we had already stored as # an outlier in the database. We now have some state at that event @@ -1369,7 +1383,10 @@ class PersistEventsStore: # events down /sync. In general they will be historical events, so that # doesn't matter too much, but that is not always the case. - logger.info("Updating state for ex-outlier event %s", event.event_id) + logger.info( + "_update_outliers_txn: Updating state for ex-outlier event %s", + event.event_id, + ) # insert into event_to_state_groups. try: diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index cf6b130e4f..50e376f695 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -22,6 +22,7 @@ from twisted.python.failure import Failure from twisted.test.proto_helpers import MemoryReactor from synapse.api.room_versions import RoomVersions +from synapse.events import EventBase from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -38,20 +39,24 @@ class FederationClientTest(FederatingHomeserverTestCase): self._mock_agent = mock.create_autospec(twisted.web.client.Agent, spec_set=True) homeserver.get_federation_http_client().agent = self._mock_agent - def test_get_room_state(self): - creator = f"@creator:{self.OTHER_SERVER_NAME}" - test_room_id = "!room_id" + # Move clock up to somewhat realistic time so the PDU destination retry + # works (`now` needs to be larger than `0 + PDU_RETRY_TIME_MS`). + self.reactor.advance(1000000000) + + self.creator = f"@creator:{self.OTHER_SERVER_NAME}" + self.test_room_id = "!room_id" + def test_get_room_state(self): # mock up some events to use in the response. # In real life, these would have things in `prev_events` and `auth_events`, but that's # a bit annoying to mock up, and the code under test doesn't care, so we don't bother. create_event_dict = self.add_hashes_and_signatures_from_other_server( { - "room_id": test_room_id, + "room_id": self.test_room_id, "type": "m.room.create", "state_key": "", - "sender": creator, - "content": {"creator": creator}, + "sender": self.creator, + "content": {"creator": self.creator}, "prev_events": [], "auth_events": [], "origin_server_ts": 500, @@ -59,10 +64,10 @@ class FederationClientTest(FederatingHomeserverTestCase): ) member_event_dict = self.add_hashes_and_signatures_from_other_server( { - "room_id": test_room_id, + "room_id": self.test_room_id, "type": "m.room.member", - "sender": creator, - "state_key": creator, + "sender": self.creator, + "state_key": self.creator, "content": {"membership": "join"}, "prev_events": [], "auth_events": [], @@ -71,9 +76,9 @@ class FederationClientTest(FederatingHomeserverTestCase): ) pl_event_dict = self.add_hashes_and_signatures_from_other_server( { - "room_id": test_room_id, + "room_id": self.test_room_id, "type": "m.room.power_levels", - "sender": creator, + "sender": self.creator, "state_key": "", "content": {}, "prev_events": [], @@ -103,7 +108,7 @@ class FederationClientTest(FederatingHomeserverTestCase): state_resp, auth_resp = self.get_success( self.hs.get_federation_client().get_room_state( "yet.another.server", - test_room_id, + self.test_room_id, "event_id", RoomVersions.V9, ) @@ -130,6 +135,102 @@ class FederationClientTest(FederatingHomeserverTestCase): ["m.room.create", "m.room.member", "m.room.power_levels"], ) + def test_get_pdu_returns_nothing_when_event_does_not_exist(self): + """No event should be returned when the event does not exist""" + remote_pdu = self.get_success( + self.hs.get_federation_client().get_pdu( + ["yet.another.server"], + "event_should_not_exist", + RoomVersions.V9, + ) + ) + self.assertEqual(remote_pdu, None) + + def test_get_pdu(self): + """Test to make sure an event is returned by `get_pdu()`""" + self._get_pdu_once() + + def test_get_pdu_event_from_cache_is_pristine(self): + """Test that modifications made to events returned by `get_pdu()` + do not propagate back to to the internal cache (events returned should + be a copy). + """ + + # Get the PDU in the cache + remote_pdu = self._get_pdu_once() + + # Modify the the event reference. + # This change should not make it back to the `_get_pdu_cache`. + remote_pdu.internal_metadata.outlier = True + + # Get the event again. This time it should read it from cache. + remote_pdu2 = self.get_success( + self.hs.get_federation_client().get_pdu( + ["yet.another.server"], + remote_pdu.event_id, + RoomVersions.V9, + ) + ) + + # Sanity check that we are working against the same event + self.assertEqual(remote_pdu.event_id, remote_pdu2.event_id) + + # Make sure the event does not include modification from earlier + self.assertIsNotNone(remote_pdu2) + self.assertEqual(remote_pdu2.internal_metadata.outlier, False) + + def _get_pdu_once(self) -> EventBase: + """Retrieve an event via `get_pdu()` and assert that an event was returned. + Also used to prime the cache for subsequent test logic. + """ + message_event_dict = self.add_hashes_and_signatures_from_other_server( + { + "room_id": self.test_room_id, + "type": "m.room.message", + "sender": self.creator, + "state_key": "", + "content": {}, + "prev_events": [], + "auth_events": [], + "origin_server_ts": 700, + "depth": 10, + } + ) + + # mock up the response, and have the agent return it + self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed( + _mock_response( + { + "origin": "yet.another.server", + "origin_server_ts": 900, + "pdus": [ + message_event_dict, + ], + } + ) + ) + + remote_pdu = self.get_success( + self.hs.get_federation_client().get_pdu( + ["yet.another.server"], + "event_id", + RoomVersions.V9, + ) + ) + + # check the right call got made to the agent + self._mock_agent.request.assert_called_once_with( + b"GET", + b"matrix://yet.another.server/_matrix/federation/v1/event/event_id", + headers=mock.ANY, + bodyProducer=None, + ) + + self.assertIsNotNone(remote_pdu) + self.assertEqual(remote_pdu.internal_metadata.outlier, False) + + return remote_pdu + def _mock_response(resp: JsonDict): body = json.dumps(resp).encode("utf-8") -- cgit 1.4.1 From b909d5327b06af20505a8c02e34765625f215bfe Mon Sep 17 00:00:00 2001 From: David Teller Date: Wed, 20 Jul 2022 11:04:54 +0200 Subject: Document `rc_invites.per_issuer`, added in v1.63. Resolves #13330. Missed in #13125. Signed-off-by: David Teller --- changelog.d/13333.doc | 1 + docs/usage/configuration/config_documentation.md | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/13333.doc diff --git a/changelog.d/13333.doc b/changelog.d/13333.doc new file mode 100644 index 0000000000..57cbdf05c8 --- /dev/null +++ b/changelog.d/13333.doc @@ -0,0 +1 @@ +Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 6d7d700a13..601fdeb09e 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1504,6 +1504,8 @@ The `rc_invites.per_user` limit applies to the *receiver* of the invite, rather sender, meaning that a `rc_invite.per_user.burst_count` of 5 mandates that a single user cannot *receive* more than a burst of 5 invites at a time. +In contrast, the `rc_invites.per_issuer` limit applies to the *issuer* of the invite, meaning that a `rc_invite.per_issuer.burst_count` of 5 mandates that single user cannot *send* more than a burst of 5 invites at a time. + Example configuration: ```yaml rc_invites: @@ -1513,7 +1515,13 @@ rc_invites: per_user: per_second: 0.004 burst_count: 3 + per_issuer: + per_second: 0.5 + burst_count: 5 ``` + +_Changed in version 1.63:_ added the `per_issuer` limit. + --- ### `rc_third_party_invite` -- cgit 1.4.1 From 190f49d8aba3b18bb9b9c2cd8352dc9b402d6bbf Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Thu, 21 Jul 2022 12:51:30 +0200 Subject: Use cache store remove base slaved (#13329) This comes from two identical definitions in each of the base stores, and means the base slaved store is now empty and can be removed. --- changelog.d/13329.misc | 1 + synapse/app/admin_cmd.py | 2 - synapse/app/generic_worker.py | 2 - synapse/replication/slave/storage/_base.py | 58 ----------------------- synapse/replication/slave/storage/account_data.py | 3 +- synapse/replication/slave/storage/deviceinbox.py | 3 +- synapse/replication/slave/storage/devices.py | 3 +- synapse/replication/slave/storage/directory.py | 4 +- synapse/replication/slave/storage/events.py | 3 -- synapse/replication/slave/storage/filtering.py | 5 +- synapse/replication/slave/storage/profile.py | 3 +- synapse/replication/slave/storage/pushers.py | 3 +- synapse/replication/slave/storage/receipts.py | 4 +- synapse/replication/slave/storage/registration.py | 4 +- synapse/storage/databases/main/__init__.py | 29 +----------- synapse/storage/databases/main/cache.py | 26 ++++++++++ 16 files changed, 39 insertions(+), 114 deletions(-) create mode 100644 changelog.d/13329.misc delete mode 100644 synapse/replication/slave/storage/_base.py diff --git a/changelog.d/13329.misc b/changelog.d/13329.misc new file mode 100644 index 0000000000..4df9a9f6d7 --- /dev/null +++ b/changelog.d/13329.misc @@ -0,0 +1 @@ +Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 87f82bd9a5..53ec33bcd1 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -28,7 +28,6 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.events import EventBase from synapse.handlers.admin import ExfiltrationWriter -from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore @@ -58,7 +57,6 @@ class AdminCmdSlavedStore( SlavedDeviceStore, SlavedPushRuleStore, SlavedEventStore, - BaseSlavedStore, RoomWorkerStore, ): def __init__( diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 4a987fb759..0c16584abc 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -48,7 +48,6 @@ from synapse.http.site import SynapseRequest, SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore @@ -251,7 +250,6 @@ class GenericWorkerSlavedStore( TransactionWorkerStore, LockStore, SessionStore, - BaseSlavedStore, ): # Properties that multiple storage classes define. Tell mypy what the # expected type is. diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py deleted file mode 100644 index 7644146dba..0000000000 --- a/synapse/replication/slave/storage/_base.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import TYPE_CHECKING, Optional - -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection -from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore -from synapse.storage.engines import PostgresEngine -from synapse.storage.util.id_generators import MultiWriterIdGenerator - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class BaseSlavedStore(CacheInvalidationWorkerStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - super().__init__(database, db_conn, hs) - if isinstance(self.database_engine, PostgresEngine): - self._cache_id_gen: Optional[ - MultiWriterIdGenerator - ] = MultiWriterIdGenerator( - db_conn, - database, - stream_name="caches", - instance_name=hs.get_instance_name(), - tables=[ - ( - "cache_invalidation_stream_by_instance", - "instance_name", - "stream_id", - ) - ], - sequence_name="cache_invalidation_stream_seq", - writers=[], - ) - else: - self._cache_id_gen = None - - self.hs = hs diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py index ee74ee7d85..57d3237981 100644 --- a/synapse/replication/slave/storage/account_data.py +++ b/synapse/replication/slave/storage/account_data.py @@ -13,10 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.storage.databases.main.account_data import AccountDataWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore -class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore, BaseSlavedStore): +class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore): pass diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py index e940751084..df9e4d8f45 100644 --- a/synapse/replication/slave/storage/deviceinbox.py +++ b/synapse/replication/slave/storage/deviceinbox.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore -class SlavedDeviceInboxStore(DeviceInboxWorkerStore, BaseSlavedStore): +class SlavedDeviceInboxStore(DeviceInboxWorkerStore): pass diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index a48cc02069..6fcade510a 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -14,7 +14,6 @@ from typing import TYPE_CHECKING, Any, Iterable -from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.storage.database import DatabasePool, LoggingDatabaseConnection @@ -24,7 +23,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer -class SlavedDeviceStore(DeviceWorkerStore, BaseSlavedStore): +class SlavedDeviceStore(DeviceWorkerStore): def __init__( self, database: DatabasePool, diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py index 71fde0c96c..ca716df3df 100644 --- a/synapse/replication/slave/storage/directory.py +++ b/synapse/replication/slave/storage/directory.py @@ -14,8 +14,6 @@ from synapse.storage.databases.main.directory import DirectoryWorkerStore -from ._base import BaseSlavedStore - -class DirectoryStore(DirectoryWorkerStore, BaseSlavedStore): +class DirectoryStore(DirectoryWorkerStore): pass diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index a72dad7464..fe47778cb1 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -29,8 +29,6 @@ from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore - if TYPE_CHECKING: from synapse.server import HomeServer @@ -56,7 +54,6 @@ class SlavedEventStore( EventsWorkerStore, UserErasureWorkerStore, RelationsWorkerStore, - BaseSlavedStore, ): def __init__( self, diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py index 4d185e2b56..c52679cd60 100644 --- a/synapse/replication/slave/storage/filtering.py +++ b/synapse/replication/slave/storage/filtering.py @@ -14,16 +14,15 @@ from typing import TYPE_CHECKING +from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.filtering import FilteringStore -from ._base import BaseSlavedStore - if TYPE_CHECKING: from synapse.server import HomeServer -class SlavedFilteringStore(BaseSlavedStore): +class SlavedFilteringStore(SQLBaseStore): def __init__( self, database: DatabasePool, diff --git a/synapse/replication/slave/storage/profile.py b/synapse/replication/slave/storage/profile.py index 99f4a22642..a774a2ff48 100644 --- a/synapse/replication/slave/storage/profile.py +++ b/synapse/replication/slave/storage/profile.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.storage.databases.main.profile import ProfileWorkerStore -class SlavedProfileStore(ProfileWorkerStore, BaseSlavedStore): +class SlavedProfileStore(ProfileWorkerStore): pass diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index de642bba71..44ed20e424 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -18,14 +18,13 @@ from synapse.replication.tcp.streams import PushersStream from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.pusher import PusherWorkerStore -from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker if TYPE_CHECKING: from synapse.server import HomeServer -class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): +class SlavedPusherStore(PusherWorkerStore): def __init__( self, database: DatabasePool, diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py index 3826b87dec..407862a2b2 100644 --- a/synapse/replication/slave/storage/receipts.py +++ b/synapse/replication/slave/storage/receipts.py @@ -15,8 +15,6 @@ from synapse.storage.databases.main.receipts import ReceiptsWorkerStore -from ._base import BaseSlavedStore - -class SlavedReceiptsStore(ReceiptsWorkerStore, BaseSlavedStore): +class SlavedReceiptsStore(ReceiptsWorkerStore): pass diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py index 5dae35a960..52c593e59d 100644 --- a/synapse/replication/slave/storage/registration.py +++ b/synapse/replication/slave/storage/registration.py @@ -14,8 +14,6 @@ from synapse.storage.databases.main.registration import RegistrationWorkerStore -from ._base import BaseSlavedStore - -class SlavedRegistrationStore(RegistrationWorkerStore, BaseSlavedStore): +class SlavedRegistrationStore(RegistrationWorkerStore): pass diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index a3d31d3737..4dccbb732a 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -24,9 +24,9 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.databases.main.stats import UserSortOrder -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Cursor -from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator +from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict, get_domain_from_id from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -149,31 +149,6 @@ class DataStore( ], ) - self._cache_id_gen: Optional[MultiWriterIdGenerator] - if isinstance(self.database_engine, PostgresEngine): - # We set the `writers` to an empty list here as we don't care about - # missing updates over restarts, as we'll not have anything in our - # caches to invalidate. (This reduces the amount of writes to the DB - # that happen). - self._cache_id_gen = MultiWriterIdGenerator( - db_conn, - database, - stream_name="caches", - instance_name=hs.get_instance_name(), - tables=[ - ( - "cache_invalidation_stream_by_instance", - "instance_name", - "stream_id", - ) - ], - sequence_name="cache_invalidation_stream_seq", - writers=[], - ) - - else: - self._cache_id_gen = None - super().__init__(database, db_conn, hs) events_max = self._stream_id_gen.get_current_token() diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 2367ddeea3..12e9a42382 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -32,6 +32,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.engines import PostgresEngine +from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.util.caches.descriptors import _CachedFunction from synapse.util.iterutils import batch_iter @@ -65,6 +66,31 @@ class CacheInvalidationWorkerStore(SQLBaseStore): psql_only=True, # The table is only on postgres DBs. ) + self._cache_id_gen: Optional[MultiWriterIdGenerator] + if isinstance(self.database_engine, PostgresEngine): + # We set the `writers` to an empty list here as we don't care about + # missing updates over restarts, as we'll not have anything in our + # caches to invalidate. (This reduces the amount of writes to the DB + # that happen). + self._cache_id_gen = MultiWriterIdGenerator( + db_conn, + database, + stream_name="caches", + instance_name=hs.get_instance_name(), + tables=[ + ( + "cache_invalidation_stream_by_instance", + "instance_name", + "stream_id", + ) + ], + sequence_name="cache_invalidation_stream_seq", + writers=[], + ) + + else: + self._cache_id_gen = None + async def get_all_updated_caches( self, instance_name: str, last_id: int, current_id: int, limit: int ) -> Tuple[List[Tuple[int, tuple]], int, bool]: -- cgit 1.4.1 From 50122754c8743df5c904e81b634fdfdeea64e795 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 21 Jul 2022 08:01:52 -0400 Subject: Add missing types to opentracing. (#13345) After this change `synapse.logging` is fully typed. --- changelog.d/13328.misc | 2 +- changelog.d/13345.misc | 1 + mypy.ini | 3 -- synapse/federation/transport/server/_base.py | 2 +- synapse/handlers/device.py | 8 ++--- synapse/handlers/e2e_keys.py | 16 ++++----- synapse/handlers/e2e_room_keys.py | 4 +-- synapse/logging/opentracing.py | 44 ++++++++++++++++++----- synapse/metrics/background_process_metrics.py | 2 +- synapse/rest/client/keys.py | 4 ++- synapse/storage/databases/main/deviceinbox.py | 2 +- synapse/storage/databases/main/devices.py | 4 +-- synapse/storage/databases/main/end_to_end_keys.py | 6 ++-- tests/logging/test_opentracing.py | 30 +++++++++++----- 14 files changed, 83 insertions(+), 45 deletions(-) create mode 100644 changelog.d/13345.misc diff --git a/changelog.d/13328.misc b/changelog.d/13328.misc index d15fb5fc37..c80578ce95 100644 --- a/changelog.d/13328.misc +++ b/changelog.d/13328.misc @@ -1 +1 @@ -Add type hints to `trace` decorator. +Add missing type hints to open tracing module. diff --git a/changelog.d/13345.misc b/changelog.d/13345.misc new file mode 100644 index 0000000000..c80578ce95 --- /dev/null +++ b/changelog.d/13345.misc @@ -0,0 +1 @@ +Add missing type hints to open tracing module. diff --git a/mypy.ini b/mypy.ini index ea0ab003a8..6add272990 100644 --- a/mypy.ini +++ b/mypy.ini @@ -84,9 +84,6 @@ disallow_untyped_defs = False [mypy-synapse.http.matrixfederationclient] disallow_untyped_defs = False -[mypy-synapse.logging.opentracing] -disallow_untyped_defs = False - [mypy-synapse.metrics._reactor_metrics] disallow_untyped_defs = False # This module imports select.epoll. That exists on Linux, but doesn't on macOS. diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 84100a5a52..bb0f8d6b7b 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -309,7 +309,7 @@ class BaseFederationServlet: raise # update the active opentracing span with the authenticated entity - set_tag("authenticated_entity", origin) + set_tag("authenticated_entity", str(origin)) # if the origin is authenticated and whitelisted, use its span context # as the parent. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index c05a170c55..1a8379854c 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -118,8 +118,8 @@ class DeviceWorkerHandler: ips = await self.store.get_last_client_ip_by_device(user_id, device_id) _update_device_from_client_ips(device, ips) - set_tag("device", device) - set_tag("ips", ips) + set_tag("device", str(device)) + set_tag("ips", str(ips)) return device @@ -170,7 +170,7 @@ class DeviceWorkerHandler: """ set_tag("user_id", user_id) - set_tag("from_token", from_token) + set_tag("from_token", str(from_token)) now_room_key = self.store.get_room_max_token() room_ids = await self.store.get_rooms_for_user(user_id) @@ -795,7 +795,7 @@ class DeviceListUpdater: """ set_tag("origin", origin) - set_tag("edu_content", edu_content) + set_tag("edu_content", str(edu_content)) user_id = edu_content.pop("user_id") device_id = edu_content.pop("device_id") stream_id = str(edu_content.pop("stream_id")) # They may come as ints diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 84c28c480e..c938339ddd 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -138,8 +138,8 @@ class E2eKeysHandler: else: remote_queries[user_id] = device_ids - set_tag("local_key_query", local_query) - set_tag("remote_key_query", remote_queries) + set_tag("local_key_query", str(local_query)) + set_tag("remote_key_query", str(remote_queries)) # First get local devices. # A map of destination -> failure response. @@ -343,7 +343,7 @@ class E2eKeysHandler: failure = _exception_to_failure(e) failures[destination] = failure set_tag("error", True) - set_tag("reason", failure) + set_tag("reason", str(failure)) return @@ -405,7 +405,7 @@ class E2eKeysHandler: Returns: A map from user_id -> device_id -> device details """ - set_tag("local_query", query) + set_tag("local_query", str(query)) local_query: List[Tuple[str, Optional[str]]] = [] result_dict: Dict[str, Dict[str, dict]] = {} @@ -477,8 +477,8 @@ class E2eKeysHandler: domain = get_domain_from_id(user_id) remote_queries.setdefault(domain, {})[user_id] = one_time_keys - set_tag("local_key_query", local_query) - set_tag("remote_key_query", remote_queries) + set_tag("local_key_query", str(local_query)) + set_tag("remote_key_query", str(remote_queries)) results = await self.store.claim_e2e_one_time_keys(local_query) @@ -508,7 +508,7 @@ class E2eKeysHandler: failure = _exception_to_failure(e) failures[destination] = failure set_tag("error", True) - set_tag("reason", failure) + set_tag("reason", str(failure)) await make_deferred_yieldable( defer.gatherResults( @@ -611,7 +611,7 @@ class E2eKeysHandler: result = await self.store.count_e2e_one_time_keys(user_id, device_id) - set_tag("one_time_key_counts", result) + set_tag("one_time_key_counts", str(result)) return {"one_time_key_counts": result} async def _upload_one_time_keys_for_user( diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 446f509bdc..28dc08c22a 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, Optional +from typing import TYPE_CHECKING, Dict, Optional, cast from typing_extensions import Literal @@ -97,7 +97,7 @@ class E2eRoomKeysHandler: user_id, version, room_id, session_id ) - log_kv(results) + log_kv(cast(JsonDict, results)) return results @trace diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 17e729f0c7..ad5cbf46a4 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -182,6 +182,8 @@ from typing import ( Type, TypeVar, Union, + cast, + overload, ) import attr @@ -328,6 +330,7 @@ class _Sentinel(enum.Enum): P = ParamSpec("P") R = TypeVar("R") +T = TypeVar("T") def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]: @@ -343,22 +346,43 @@ def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]: return _only_if_tracing_inner -def ensure_active_span(message: str, ret=None): +@overload +def ensure_active_span( + message: str, +) -> Callable[[Callable[P, R]], Callable[P, Optional[R]]]: + ... + + +@overload +def ensure_active_span( + message: str, ret: T +) -> Callable[[Callable[P, R]], Callable[P, Union[T, R]]]: + ... + + +def ensure_active_span( + message: str, ret: Optional[T] = None +) -> Callable[[Callable[P, R]], Callable[P, Union[Optional[T], R]]]: """Executes the operation only if opentracing is enabled and there is an active span. If there is no active span it logs message at the error level. Args: message: Message which fills in "There was no active span when trying to %s" in the error log if there is no active span and opentracing is enabled. - ret (object): return value if opentracing is None or there is no active span. + ret: return value if opentracing is None or there is no active span. - Returns (object): The result of the func or ret if opentracing is disabled or there + Returns: + The result of the func, falling back to ret if opentracing is disabled or there was no active span. """ - def ensure_active_span_inner_1(func): + def ensure_active_span_inner_1( + func: Callable[P, R] + ) -> Callable[P, Union[Optional[T], R]]: @wraps(func) - def ensure_active_span_inner_2(*args, **kwargs): + def ensure_active_span_inner_2( + *args: P.args, **kwargs: P.kwargs + ) -> Union[Optional[T], R]: if not opentracing: return ret @@ -464,7 +488,7 @@ def start_active_span( finish_on_close: bool = True, *, tracer: Optional["opentracing.Tracer"] = None, -): +) -> "opentracing.Scope": """Starts an active opentracing span. Records the start time for the span, and sets it as the "active span" in the @@ -502,7 +526,7 @@ def start_active_span_follows_from( *, inherit_force_tracing: bool = False, tracer: Optional["opentracing.Tracer"] = None, -): +) -> "opentracing.Scope": """Starts an active opentracing span, with additional references to previous spans Args: @@ -717,7 +741,9 @@ def inject_response_headers(response_headers: Headers) -> None: response_headers.addRawHeader("Synapse-Trace-Id", f"{trace_id:x}") -@ensure_active_span("get the active span context as a dict", ret={}) +@ensure_active_span( + "get the active span context as a dict", ret=cast(Dict[str, str], {}) +) def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str]: """ Gets a span context as a dict. This can be used instead of manually @@ -886,7 +912,7 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: for i, arg in enumerate(argspec.args[1:]): set_tag("ARG_" + arg, args[i]) # type: ignore[index] set_tag("args", args[len(argspec.args) :]) # type: ignore[index] - set_tag("kwargs", kwargs) + set_tag("kwargs", str(kwargs)) return func(*args, **kwargs) return _tag_args_inner diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index eef3462e10..7a1516d3a8 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -235,7 +235,7 @@ def run_as_background_process( f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)} ) else: - ctx = nullcontext() + ctx = nullcontext() # type: ignore[assignment] with ctx: return await func(*args, **kwargs) except Exception: diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index eb1b85721f..e3f454896a 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -208,7 +208,9 @@ class KeyChangesServlet(RestServlet): # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. - set_tag("to", parse_string(request, "to")) + # + # XXX This does not enforce that "to" is passed. + set_tag("to", str(parse_string(request, "to"))) from_token = await StreamToken.from_string(self.store, from_token_string) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 422e0e65ca..73c95ffb6f 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -436,7 +436,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): (user_id, device_id), None ) - set_tag("last_deleted_stream_id", last_deleted_stream_id) + set_tag("last_deleted_stream_id", str(last_deleted_stream_id)) if last_deleted_stream_id: has_changed = self._device_inbox_stream_cache.has_entity_changed( diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 7a6ed332aa..ca0fe8c4be 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -706,8 +706,8 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore): else: results[user_id] = await self.get_cached_devices_for_user(user_id) - set_tag("in_cache", results) - set_tag("not_in_cache", user_ids_not_in_cache) + set_tag("in_cache", str(results)) + set_tag("not_in_cache", str(user_ids_not_in_cache)) return user_ids_not_in_cache, results diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 60f622ad71..46c0d06157 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -146,7 +146,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker key data. The key data will be a dict in the same format as the DeviceKeys type returned by POST /_matrix/client/r0/keys/query. """ - set_tag("query_list", query_list) + set_tag("query_list", str(query_list)) if not query_list: return {} @@ -418,7 +418,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker def _add_e2e_one_time_keys(txn: LoggingTransaction) -> None: set_tag("user_id", user_id) set_tag("device_id", device_id) - set_tag("new_keys", new_keys) + set_tag("new_keys", str(new_keys)) # We are protected from race between lookup and insertion due to # a unique constraint. If there is a race of two calls to # `add_e2e_one_time_keys` then they'll conflict and we will only @@ -1161,7 +1161,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): set_tag("user_id", user_id) set_tag("device_id", device_id) set_tag("time_now", time_now) - set_tag("device_keys", device_keys) + set_tag("device_keys", str(device_keys)) old_key_json = self.db_pool.simple_select_one_onecol_txn( txn, diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 40148d503c..3b14c76d7e 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import cast + from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactorClock @@ -40,6 +42,15 @@ from tests.unittest import TestCase class LogContextScopeManagerTestCase(TestCase): + """ + Test logging contexts and active opentracing spans. + + There's casts throughout this from generic opentracing objects (e.g. + opentracing.Span) to the ones specific to Jaeger since they have additional + properties that these tests depend on. This is safe since the only supported + opentracing backend is Jaeger. + """ + if LogContextScopeManager is None: skip = "Requires opentracing" # type: ignore[unreachable] if jaeger_client is None: @@ -69,7 +80,7 @@ class LogContextScopeManagerTestCase(TestCase): # start_active_span should start and activate a span. scope = start_active_span("span", tracer=self._tracer) - span = scope.span + span = cast(jaeger_client.Span, scope.span) self.assertEqual(self._tracer.active_span, span) self.assertIsNotNone(span.start_time) @@ -91,6 +102,7 @@ class LogContextScopeManagerTestCase(TestCase): with LoggingContext("root context"): with start_active_span("root span", tracer=self._tracer) as root_scope: self.assertEqual(self._tracer.active_span, root_scope.span) + root_context = cast(jaeger_client.SpanContext, root_scope.span.context) scope1 = start_active_span( "child1", @@ -99,9 +111,8 @@ class LogContextScopeManagerTestCase(TestCase): self.assertEqual( self._tracer.active_span, scope1.span, "child1 was not activated" ) - self.assertEqual( - scope1.span.context.parent_id, root_scope.span.context.span_id - ) + context1 = cast(jaeger_client.SpanContext, scope1.span.context) + self.assertEqual(context1.parent_id, root_context.span_id) scope2 = start_active_span_follows_from( "child2", @@ -109,17 +120,18 @@ class LogContextScopeManagerTestCase(TestCase): tracer=self._tracer, ) self.assertEqual(self._tracer.active_span, scope2.span) - self.assertEqual( - scope2.span.context.parent_id, scope1.span.context.span_id - ) + context2 = cast(jaeger_client.SpanContext, scope2.span.context) + self.assertEqual(context2.parent_id, context1.span_id) with scope1, scope2: pass # the root scope should be restored self.assertEqual(self._tracer.active_span, root_scope.span) - self.assertIsNotNone(scope2.span.end_time) - self.assertIsNotNone(scope1.span.end_time) + span2 = cast(jaeger_client.Span, scope2.span) + span1 = cast(jaeger_client.Span, scope1.span) + self.assertIsNotNone(span2.end_time) + self.assertIsNotNone(span1.end_time) self.assertIsNone(self._tracer.active_span) -- cgit 1.4.1 From 34949ead1f1f290710441d40187f7a35534ec1b2 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 21 Jul 2022 13:23:05 +0100 Subject: Track DB txn times w/ two counters, not histogram (#13342) --- changelog.d/13342.misc | 1 + synapse/storage/database.py | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13342.misc diff --git a/changelog.d/13342.misc b/changelog.d/13342.misc new file mode 100644 index 0000000000..ce9c816b9c --- /dev/null +++ b/changelog.d/13342.misc @@ -0,0 +1 @@ +When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index ea672ff89e..b394a6658b 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -39,7 +39,7 @@ from typing import ( ) import attr -from prometheus_client import Histogram +from prometheus_client import Counter, Histogram from typing_extensions import Concatenate, Literal, ParamSpec from twisted.enterprise import adbapi @@ -76,7 +76,8 @@ perf_logger = logging.getLogger("synapse.storage.TIME") sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "sec") sql_query_timer = Histogram("synapse_storage_query_time", "sec", ["verb"]) -sql_txn_timer = Histogram("synapse_storage_transaction_time", "sec", ["desc"]) +sql_txn_count = Counter("synapse_storage_transaction_time_count", "sec", ["desc"]) +sql_txn_duration = Counter("synapse_storage_transaction_time_sum", "sec", ["desc"]) # Unique indexes which have been added in background updates. Maps from table name @@ -795,7 +796,8 @@ class DatabasePool: self._current_txn_total_time += duration self._txn_perf_counters.update(desc, duration) - sql_txn_timer.labels(desc).observe(duration) + sql_txn_count.labels(desc).inc(1) + sql_txn_duration.labels(desc).inc(duration) async def runInteraction( self, -- cgit 1.4.1 From 10e40938398b6a462801be7eab9f5e6260649a0a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 21 Jul 2022 14:29:58 +0200 Subject: Call out buildkit is required when building test docker images (#13338) Co-authored-by: David Robertson --- changelog.d/13338.doc | 1 + docker/Dockerfile-workers | 1 + docker/README-testing.md | 4 ++++ docker/complement/Dockerfile | 1 + 4 files changed, 7 insertions(+) create mode 100644 changelog.d/13338.doc diff --git a/changelog.d/13338.doc b/changelog.d/13338.doc new file mode 100644 index 0000000000..7acf6d3f34 --- /dev/null +++ b/changelog.d/13338.doc @@ -0,0 +1 @@ +Mention that BuildKit is needed when building Docker images for tests. diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 0f1570cfb6..84f836ff7b 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # Inherit from the official Synapse docker image ARG SYNAPSE_VERSION=latest FROM matrixdotorg/synapse:$SYNAPSE_VERSION diff --git a/docker/README-testing.md b/docker/README-testing.md index 1f0423f09b..21b99963d8 100644 --- a/docker/README-testing.md +++ b/docker/README-testing.md @@ -22,6 +22,10 @@ Consult the [contributing guide][guideComplementSh] for instructions on how to u Under some circumstances, you may wish to build the images manually. The instructions below will lead you to doing that. +Note that these images can only be built using [BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/), +therefore BuildKit needs to be enabled when calling `docker build`. This can be done by +setting `DOCKER_BUILDKIT=1` in your environment. + Start by building the base Synapse docker image. If you wish to run tests with the latest release of Synapse, instead of your current checkout, you can skip this step. From the root of the repository: diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index c5e7984a28..3cfff19f9a 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse # by including a built-in postgres instance, as well as setting up the homeserver so # that it is ready for testing via Complement. -- cgit 1.4.1 From 13341dde5a4854588ec89b832aed256524abff73 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Jul 2022 16:02:02 +0100 Subject: Don't hold onto full state in state cache (#13324) --- changelog.d/13324.misc | 1 + synapse/state/__init__.py | 68 ++++++++++++++++++++++++++++++++++++----------- 2 files changed, 54 insertions(+), 15 deletions(-) create mode 100644 changelog.d/13324.misc diff --git a/changelog.d/13324.misc b/changelog.d/13324.misc new file mode 100644 index 0000000000..30670cf56c --- /dev/null +++ b/changelog.d/13324.misc @@ -0,0 +1 @@ +Reduce the amount of state we store in the `state_cache`. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index e3faa52cd6..87ccd52f0a 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -14,7 +14,7 @@ # limitations under the License. import heapq import logging -from collections import defaultdict +from collections import ChainMap, defaultdict from typing import ( TYPE_CHECKING, Any, @@ -92,8 +92,11 @@ class _StateCacheEntry: prev_group: Optional[int] = None, delta_ids: Optional[StateMap[str]] = None, ): - if state is None and state_group is None: - raise Exception("Either state or state group must be not None") + if state is None and state_group is None and prev_group is None: + raise Exception("One of state, state_group or prev_group must be not None") + + if prev_group is not None and delta_ids is None: + raise Exception("If prev_group is set so must delta_ids") # A map from (type, state_key) to event_id. # @@ -120,18 +123,48 @@ class _StateCacheEntry: if self._state is not None: return self._state - assert self.state_group is not None + if self.state_group is not None: + return await state_storage.get_state_ids_for_group( + self.state_group, state_filter + ) + + assert self.prev_group is not None and self.delta_ids is not None - return await state_storage.get_state_ids_for_group( - self.state_group, state_filter + prev_state = await state_storage.get_state_ids_for_group( + self.prev_group, state_filter ) + # ChainMap expects MutableMapping, but since we're using it immutably + # its safe to give it immutable maps. + return ChainMap(self.delta_ids, prev_state) # type: ignore[arg-type] + + def set_state_group(self, state_group: int) -> None: + """Update the state group assigned to this state (e.g. after we've + persisted it). + + Note: this will cause the cache entry to drop any stored state. + """ + + self.state_group = state_group + + # We clear out the state as we know longer need to explicitly keep it in + # the `state_cache` (as the store state group cache will do that). + self._state = None + def __len__(self) -> int: - # The len should is used to estimate how large this cache entry is, for - # cache eviction purposes. This is why if `self.state` is None it's fine - # to return 1. + # The len should be used to estimate how large this cache entry is, for + # cache eviction purposes. This is why it's fine to return 1 if we're + # not storing any state. + + length = 0 - return len(self._state) if self._state else 1 + if self._state: + length += len(self._state) + + if self.delta_ids: + length += len(self.delta_ids) + + return length or 1 # Make sure its not 0. class StateHandler: @@ -320,7 +353,7 @@ class StateHandler: current_state_ids=state_ids_before_event, ) ) - entry.state_group = state_group_before_event + entry.set_state_group(state_group_before_event) else: state_group_before_event = entry.state_group @@ -747,7 +780,7 @@ def _make_state_cache_entry( old_state_event_ids = set(state.values()) if new_state_event_ids == old_state_event_ids: # got an exact match. - return _StateCacheEntry(state=new_state, state_group=sg) + return _StateCacheEntry(state=None, state_group=sg) # TODO: We want to create a state group for this set of events, to # increase cache hits, but we need to make sure that it doesn't @@ -769,9 +802,14 @@ def _make_state_cache_entry( prev_group = old_group delta_ids = n_delta_ids - return _StateCacheEntry( - state=new_state, state_group=None, prev_group=prev_group, delta_ids=delta_ids - ) + if prev_group is not None: + # If we have a prev group and deltas then we can drop the new state from + # the cache (to reduce memory usage). + return _StateCacheEntry( + state=None, state_group=None, prev_group=prev_group, delta_ids=delta_ids + ) + else: + return _StateCacheEntry(state=new_state, state_group=None) @attr.s(slots=True, auto_attribs=True) -- cgit 1.4.1 From 0b87eb8e0c8e2dd4a426005dce53dfdd57282475 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Jul 2022 17:13:44 +0100 Subject: Make DictionaryCache have better expiry properties (#13292) --- changelog.d/13292.misc | 1 + synapse/storage/databases/state/store.py | 9 +- synapse/util/caches/dictionary_cache.py | 218 ++++++++++++++++++++++++++----- synapse/util/caches/lrucache.py | 90 ++++++++++++- synapse/util/caches/treecache.py | 38 ++++++ tests/storage/test_state.py | 10 +- tests/util/test_dict_cache.py | 35 ++++- 7 files changed, 358 insertions(+), 43 deletions(-) create mode 100644 changelog.d/13292.misc diff --git a/changelog.d/13292.misc b/changelog.d/13292.misc new file mode 100644 index 0000000000..67fec55330 --- /dev/null +++ b/changelog.d/13292.misc @@ -0,0 +1 @@ +Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index afbc85ad0c..bb64543c1f 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -202,7 +202,14 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): requests state from the cache, if False we need to query the DB for the missing state. """ - cache_entry = cache.get(group) + # If we are asked explicitly for a subset of keys, we only ask for those + # from the cache. This ensures that the `DictionaryCache` can make + # better decisions about what to cache and what to expire. + dict_keys = None + if not state_filter.has_wildcards(): + dict_keys = state_filter.concrete_types() + + cache_entry = cache.get(group, dict_keys=dict_keys) state_dict_ids = cache_entry.value if cache_entry.full or state_filter.is_full(): diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index d267703df0..fa91479c97 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -14,11 +14,13 @@ import enum import logging import threading -from typing import Any, Dict, Generic, Iterable, Optional, Set, TypeVar +from typing import Any, Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union import attr +from typing_extensions import Literal from synapse.util.caches.lrucache import LruCache +from synapse.util.caches.treecache import TreeCache logger = logging.getLogger(__name__) @@ -33,10 +35,12 @@ DV = TypeVar("DV") # This class can't be generic because it uses slots with attrs. # See: https://github.com/python-attrs/attrs/issues/313 -@attr.s(slots=True, auto_attribs=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class DictionaryEntry: # should be: Generic[DKT, DV]. """Returned when getting an entry from the cache + If `full` is true then `known_absent` will be the empty set. + Attributes: full: Whether the cache has the full or dict or just some keys. If not full then not all requested keys will necessarily be present @@ -53,20 +57,90 @@ class DictionaryEntry: # should be: Generic[DKT, DV]. return len(self.value) +class _FullCacheKey(enum.Enum): + """The key we use to cache the full dict.""" + + KEY = object() + + class _Sentinel(enum.Enum): # defining a sentinel in this way allows mypy to correctly handle the # type of a dictionary lookup. sentinel = object() +class _PerKeyValue(Generic[DV]): + """The cached value of a dictionary key. If `value` is the sentinel, + indicates that the requested key is known to *not* be in the full dict. + """ + + __slots__ = ["value"] + + def __init__(self, value: Union[DV, Literal[_Sentinel.sentinel]]) -> None: + self.value = value + + def __len__(self) -> int: + # We add a `__len__` implementation as we use this class in a cache + # where the values are variable length. + return 1 + + class DictionaryCache(Generic[KT, DKT, DV]): """Caches key -> dictionary lookups, supporting caching partial dicts, i.e. fetching a subset of dictionary keys for a particular key. + + This cache has two levels of key. First there is the "cache key" (of type + `KT`), which maps to a dict. The keys to that dict are the "dict key" (of + type `DKT`). The overall structure is therefore `KT->DKT->DV`. For + example, it might look like: + + { + 1: { 1: "a", 2: "b" }, + 2: { 1: "c" }, + } + + It is possible to look up either individual dict keys, or the *complete* + dict for a given cache key. + + Each dict item, and the complete dict is treated as a separate LRU + entry for the purpose of cache expiry. For example, given: + dict_cache.get(1, None) -> DictionaryEntry({1: "a", 2: "b"}) + dict_cache.get(1, [1]) -> DictionaryEntry({1: "a"}) + dict_cache.get(1, [2]) -> DictionaryEntry({2: "b"}) + + ... then the cache entry for the complete dict will expire first, + followed by the cache entry for the '1' dict key, and finally that + for the '2' dict key. """ def __init__(self, name: str, max_entries: int = 1000): - self.cache: LruCache[KT, DictionaryEntry] = LruCache( - max_size=max_entries, cache_name=name, size_callback=len + # We use a single LruCache to store two different types of entries: + # 1. Map from (key, dict_key) -> dict value (or sentinel, indicating + # the key doesn't exist in the dict); and + # 2. Map from (key, _FullCacheKey.KEY) -> full dict. + # + # The former is used when explicit keys of the dictionary are looked up, + # and the latter when the full dictionary is requested. + # + # If when explicit keys are requested and not in the cache, we then look + # to see if we have the full dict and use that if we do. If found in the + # full dict each key is added into the cache. + # + # This set up allows the `LruCache` to prune the full dict entries if + # they haven't been used in a while, even when there have been recent + # queries for subsets of the dict. + # + # Typing: + # * A key of `(KT, DKT)` has a value of `_PerKeyValue` + # * A key of `(KT, _FullCacheKey.KEY)` has a value of `Dict[DKT, DV]` + self.cache: LruCache[ + Tuple[KT, Union[DKT, Literal[_FullCacheKey.KEY]]], + Union[_PerKeyValue, Dict[DKT, DV]], + ] = LruCache( + max_size=max_entries, + cache_name=name, + cache_type=TreeCache, + size_callback=len, ) self.name = name @@ -91,23 +165,83 @@ class DictionaryCache(Generic[KT, DKT, DV]): Args: key dict_keys: If given a set of keys then return only those keys - that exist in the cache. + that exist in the cache. If None then returns the full dict + if it is in the cache. Returns: - DictionaryEntry + DictionaryEntry: If `dict_keys` is not None then `DictionaryEntry` + will contain include the keys that are in the cache. If None then + will either return the full dict if in the cache, or the empty + dict (with `full` set to False) if it isn't. """ - entry = self.cache.get(key, _Sentinel.sentinel) - if entry is not _Sentinel.sentinel: - if dict_keys is None: - return DictionaryEntry( - entry.full, entry.known_absent, dict(entry.value) - ) + if dict_keys is None: + # The caller wants the full set of dictionary keys for this cache key + return self._get_full_dict(key) + + # We are being asked for a subset of keys. + + # First go and check for each requested dict key in the cache, tracking + # which we couldn't find. + values = {} + known_absent = set() + missing = [] + for dict_key in dict_keys: + entry = self.cache.get((key, dict_key), _Sentinel.sentinel) + if entry is _Sentinel.sentinel: + missing.append(dict_key) + continue + + assert isinstance(entry, _PerKeyValue) + + if entry.value is _Sentinel.sentinel: + known_absent.add(dict_key) else: - return DictionaryEntry( - entry.full, - entry.known_absent, - {k: entry.value[k] for k in dict_keys if k in entry.value}, - ) + values[dict_key] = entry.value + + # If we found everything we can return immediately. + if not missing: + return DictionaryEntry(False, known_absent, values) + + # We are missing some keys, so check if we happen to have the full dict in + # the cache. + # + # We don't update the last access time for this cache fetch, as we + # aren't explicitly interested in the full dict and so we don't want + # requests for explicit dict keys to keep the full dict in the cache. + entry = self.cache.get( + (key, _FullCacheKey.KEY), + _Sentinel.sentinel, + update_last_access=False, + ) + if entry is _Sentinel.sentinel: + # Not in the cache, return the subset of keys we found. + return DictionaryEntry(False, known_absent, values) + + # We have the full dict! + assert isinstance(entry, dict) + + for dict_key in missing: + # We explicitly add each dict key to the cache, so that cache hit + # rates and LRU times for each key can be tracked separately. + value = entry.get(dict_key, _Sentinel.sentinel) # type: ignore[arg-type] + self.cache[(key, dict_key)] = _PerKeyValue(value) + + if value is not _Sentinel.sentinel: + values[dict_key] = value + + return DictionaryEntry(True, set(), values) + + def _get_full_dict( + self, + key: KT, + ) -> DictionaryEntry: + """Fetch the full dict for the given key.""" + + # First we check if we have cached the full dict. + entry = self.cache.get((key, _FullCacheKey.KEY), _Sentinel.sentinel) + if entry is not _Sentinel.sentinel: + assert isinstance(entry, dict) + return DictionaryEntry(True, set(), entry) return DictionaryEntry(False, set(), {}) @@ -117,7 +251,13 @@ class DictionaryCache(Generic[KT, DKT, DV]): # Increment the sequence number so that any SELECT statements that # raced with the INSERT don't update the cache (SYN-369) self.sequence += 1 - self.cache.pop(key, None) + + # We want to drop all information about the dict for the given key, so + # we use `del_multi` to delete it all in one go. + # + # We ignore the type error here: `del_multi` accepts a truncated key + # (when the key type is a tuple). + self.cache.del_multi((key,)) # type: ignore[arg-type] def invalidate_all(self) -> None: self.check_thread() @@ -131,7 +271,16 @@ class DictionaryCache(Generic[KT, DKT, DV]): value: Dict[DKT, DV], fetched_keys: Optional[Iterable[DKT]] = None, ) -> None: - """Updates the entry in the cache + """Updates the entry in the cache. + + Note: This does *not* invalidate any existing entries for the `key`. + In particular, if we add an entry for the cached "full dict" with + `fetched_keys=None`, existing entries for individual dict keys are + not invalidated. Likewise, adding entries for individual keys does + not invalidate any cached value for the full dict. + + In other words: if the underlying data is *changed*, the cache must + be explicitly invalidated via `.invalidate()`. Args: sequence @@ -149,20 +298,27 @@ class DictionaryCache(Generic[KT, DKT, DV]): # Only update the cache if the caches sequence number matches the # number that the cache had before the SELECT was started (SYN-369) if fetched_keys is None: - self._insert(key, value, set()) + self.cache[(key, _FullCacheKey.KEY)] = value else: - self._update_or_insert(key, value, fetched_keys) + self._update_subset(key, value, fetched_keys) - def _update_or_insert( - self, key: KT, value: Dict[DKT, DV], known_absent: Iterable[DKT] + def _update_subset( + self, key: KT, value: Dict[DKT, DV], fetched_keys: Iterable[DKT] ) -> None: - # We pop and reinsert as we need to tell the cache the size may have - # changed + """Add the given dictionary values as explicit keys in the cache. + + Args: + key: top-level cache key + value: The dictionary with all the values that we should cache + fetched_keys: The full set of dict keys that were looked up. Any keys + here not in `value` should be marked as "known absent". + """ + + for dict_key, dict_value in value.items(): + self.cache[(key, dict_key)] = _PerKeyValue(dict_value) - entry: DictionaryEntry = self.cache.pop(key, DictionaryEntry(False, set(), {})) - entry.value.update(value) - entry.known_absent.update(known_absent) - self.cache[key] = entry + for dict_key in fetched_keys: + if dict_key in value: + continue - def _insert(self, key: KT, value: Dict[DKT, DV], known_absent: Set[DKT]) -> None: - self.cache[key] = DictionaryEntry(True, known_absent, value) + self.cache[(key, dict_key)] = _PerKeyValue(_Sentinel.sentinel) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 31f41fec82..b3bdedb04c 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -25,8 +25,10 @@ from typing import ( Collection, Dict, Generic, + Iterable, List, Optional, + Tuple, Type, TypeVar, Union, @@ -44,7 +46,11 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces from synapse.metrics.jemalloc import get_jemalloc_stats from synapse.util import Clock, caches from synapse.util.caches import CacheMetric, EvictionReason, register_cache -from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry +from synapse.util.caches.treecache import ( + TreeCache, + iterate_tree_cache_entry, + iterate_tree_cache_items, +) from synapse.util.linked_list import ListNode if TYPE_CHECKING: @@ -537,6 +543,7 @@ class LruCache(Generic[KT, VT]): default: Literal[None] = None, callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., + update_last_access: bool = ..., ) -> Optional[VT]: ... @@ -546,6 +553,7 @@ class LruCache(Generic[KT, VT]): default: T, callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., + update_last_access: bool = ..., ) -> Union[T, VT]: ... @@ -555,10 +563,27 @@ class LruCache(Generic[KT, VT]): default: Optional[T] = None, callbacks: Collection[Callable[[], None]] = (), update_metrics: bool = True, + update_last_access: bool = True, ) -> Union[None, T, VT]: + """Look up a key in the cache + + Args: + key + default + callbacks: A collection of callbacks that will fire when the + node is removed from the cache (either due to invalidation + or expiry). + update_metrics: Whether to update the hit rate metrics + update_last_access: Whether to update the last access metrics + on a node if successfully fetched. These metrics are used + to determine when to remove the node from the cache. Set + to False if this fetch should *not* prevent a node from + being expired. + """ node = cache.get(key, None) if node is not None: - move_node_to_front(node) + if update_last_access: + move_node_to_front(node) node.add_callbacks(callbacks) if update_metrics and metrics: metrics.inc_hits() @@ -568,6 +593,65 @@ class LruCache(Generic[KT, VT]): metrics.inc_misses() return default + @overload + def cache_get_multi( + key: tuple, + default: Literal[None] = None, + update_metrics: bool = True, + ) -> Union[None, Iterable[Tuple[KT, VT]]]: + ... + + @overload + def cache_get_multi( + key: tuple, + default: T, + update_metrics: bool = True, + ) -> Union[T, Iterable[Tuple[KT, VT]]]: + ... + + @synchronized + def cache_get_multi( + key: tuple, + default: Optional[T] = None, + update_metrics: bool = True, + ) -> Union[None, T, Iterable[Tuple[KT, VT]]]: + """Returns a generator yielding all entries under the given key. + + Can only be used if backed by a tree cache. + + Example: + + cache = LruCache(10, cache_type=TreeCache) + cache[(1, 1)] = "a" + cache[(1, 2)] = "b" + cache[(2, 1)] = "c" + + items = cache.get_multi((1,)) + assert list(items) == [((1, 1), "a"), ((1, 2), "b")] + + Returns: + Either default if the key doesn't exist, or a generator of the + key/value pairs. + """ + + assert isinstance(cache, TreeCache) + + node = cache.get(key, None) + if node is not None: + if update_metrics and metrics: + metrics.inc_hits() + + # We store entries in the `TreeCache` with values of type `_Node`, + # which we need to unwrap. + return ( + (full_key, lru_node.value) + for full_key, lru_node in iterate_tree_cache_items(key, node) + ) + else: + if update_metrics and metrics: + metrics.inc_misses() + return default + @synchronized def cache_set( key: KT, value: VT, callbacks: Collection[Callable[[], None]] = () @@ -674,6 +758,8 @@ class LruCache(Generic[KT, VT]): self.setdefault = cache_set_default self.pop = cache_pop self.del_multi = cache_del_multi + if cache_type is TreeCache: + self.get_multi = cache_get_multi # `invalidate` is exposed for consistency with DeferredCache, so that it can be # invalidated by the cache invalidation replication stream. self.invalidate = cache_del_multi diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index e78305f787..c1b8ec0c73 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -64,6 +64,15 @@ class TreeCache: self.size += 1 def get(self, key, default=None): + """When `key` is a full key, fetches the value for the given key (if + any). + + If `key` is only a partial key (i.e. a truncated tuple) then returns a + `TreeCacheNode`, which can be passed to the `iterate_tree_cache_*` + functions to iterate over all entries in the cache with keys that start + with the given partial key. + """ + node = self.root for k in key[:-1]: node = node.get(k, None) @@ -139,3 +148,32 @@ def iterate_tree_cache_entry(d): yield from iterate_tree_cache_entry(value_d) else: yield d + + +def iterate_tree_cache_items(key, value): + """Helper function to iterate over the leaves of a tree, i.e. a dict of that + can contain dicts. + + The provided key is a tuple that will get prepended to the returned keys. + + Example: + + cache = TreeCache() + cache[(1, 1)] = "a" + cache[(1, 2)] = "b" + cache[(2, 1)] = "c" + + tree_node = cache.get((1,)) + + items = iterate_tree_cache_items((1,), tree_node) + assert list(items) == [((1, 1), "a"), ((1, 2), "b")] + + Returns: + A generator yielding key/value pairs. + """ + if isinstance(value, TreeCacheNode): + for sub_key, sub_value in value.items(): + yield from iterate_tree_cache_items((*key, sub_key), sub_value) + else: + # we've reached a leaf of the tree. + yield key, value diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 8043bdbde2..5564161750 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -369,8 +369,8 @@ class StateStoreTestCase(HomeserverTestCase): state_dict_ids = cache_entry.value self.assertEqual(cache_entry.full, False) - self.assertEqual(cache_entry.known_absent, {(e1.type, e1.state_key)}) - self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id}) + self.assertEqual(cache_entry.known_absent, set()) + self.assertDictEqual(state_dict_ids, {}) ############################################ # test that things work with a partial cache @@ -387,7 +387,7 @@ class StateStoreTestCase(HomeserverTestCase): ) self.assertEqual(is_all, False) - self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) + self.assertDictEqual({}, state_dict) room_id = self.room.to_string() (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( @@ -412,7 +412,7 @@ class StateStoreTestCase(HomeserverTestCase): ) self.assertEqual(is_all, False) - self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) + self.assertDictEqual({}, state_dict) (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, @@ -443,7 +443,7 @@ class StateStoreTestCase(HomeserverTestCase): ) self.assertEqual(is_all, False) - self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) + self.assertDictEqual({}, state_dict) (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py index bee66dee43..e8b6246ab5 100644 --- a/tests/util/test_dict_cache.py +++ b/tests/util/test_dict_cache.py @@ -20,7 +20,7 @@ from tests import unittest class DictCacheTestCase(unittest.TestCase): def setUp(self): - self.cache = DictionaryCache("foobar") + self.cache = DictionaryCache("foobar", max_entries=10) def test_simple_cache_hit_full(self): key = "test_simple_cache_hit_full" @@ -76,13 +76,13 @@ class DictCacheTestCase(unittest.TestCase): seq = self.cache.sequence test_value_1 = {"test": "test_simple_cache_hit_miss_partial"} - self.cache.update(seq, key, test_value_1, fetched_keys=set("test")) + self.cache.update(seq, key, test_value_1, fetched_keys={"test"}) seq = self.cache.sequence test_value_2 = {"test2": "test_simple_cache_hit_miss_partial2"} - self.cache.update(seq, key, test_value_2, fetched_keys=set("test2")) + self.cache.update(seq, key, test_value_2, fetched_keys={"test2"}) - c = self.cache.get(key) + c = self.cache.get(key, dict_keys=["test", "test2"]) self.assertEqual( { "test": "test_simple_cache_hit_miss_partial", @@ -90,3 +90,30 @@ class DictCacheTestCase(unittest.TestCase): }, c.value, ) + self.assertEqual(c.full, False) + + def test_invalidation(self): + """Test that the partial dict and full dicts get invalidated + separately. + """ + key = "some_key" + + seq = self.cache.sequence + # start by populating a "full dict" entry + self.cache.update(seq, key, {"a": "b", "c": "d"}) + + # add a bunch of individual entries, also keeping the individual + # entry for "a" warm. + for i in range(20): + self.cache.get(key, ["a"]) + self.cache.update(seq, f"key{i}", {1: 2}) + + # We should have evicted the full dict... + r = self.cache.get(key) + self.assertFalse(r.full) + self.assertTrue("c" not in r.value) + + # ... but kept the "a" entry that we kept querying. + r = self.cache.get(key, dict_keys=["a"]) + self.assertFalse(r.full) + self.assertEqual(r.value, {"a": "b"}) -- cgit 1.4.1 From 86e366a46e13710c013141094c407f185ac3fbe3 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Thu, 21 Jul 2022 19:56:45 +0200 Subject: Remove old empty/redundant slaved stores. (#13349) --- changelog.d/13349.misc | 1 + synapse/app/admin_cmd.py | 26 +- synapse/app/generic_worker.py | 34 +-- synapse/replication/slave/storage/account_data.py | 21 -- synapse/replication/slave/storage/appservice.py | 25 -- synapse/replication/slave/storage/deviceinbox.py | 19 -- synapse/replication/slave/storage/directory.py | 19 -- synapse/replication/slave/storage/profile.py | 19 -- synapse/replication/slave/storage/receipts.py | 20 -- synapse/replication/slave/storage/registration.py | 19 -- .../replication/slave/storage/test_account_data.py | 42 ---- tests/replication/slave/storage/test_receipts.py | 265 --------------------- tests/storage/test_receipts.py | 261 ++++++++++++++++++++ 13 files changed, 298 insertions(+), 473 deletions(-) create mode 100644 changelog.d/13349.misc delete mode 100644 synapse/replication/slave/storage/account_data.py delete mode 100644 synapse/replication/slave/storage/appservice.py delete mode 100644 synapse/replication/slave/storage/deviceinbox.py delete mode 100644 synapse/replication/slave/storage/directory.py delete mode 100644 synapse/replication/slave/storage/profile.py delete mode 100644 synapse/replication/slave/storage/receipts.py delete mode 100644 synapse/replication/slave/storage/registration.py delete mode 100644 tests/replication/slave/storage/test_account_data.py delete mode 100644 tests/replication/slave/storage/test_receipts.py create mode 100644 tests/storage/test_receipts.py diff --git a/changelog.d/13349.misc b/changelog.d/13349.misc new file mode 100644 index 0000000000..4df9a9f6d7 --- /dev/null +++ b/changelog.d/13349.misc @@ -0,0 +1 @@ +Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 53ec33bcd1..8a583d3ec6 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -28,18 +28,22 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.events import EventBase from synapse.handlers.admin import ExfiltrationWriter -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.server import HomeServer from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.databases.main.account_data import AccountDataWorkerStore +from synapse.storage.databases.main.appservice import ( + ApplicationServiceTransactionWorkerStore, + ApplicationServiceWorkerStore, +) +from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore +from synapse.storage.databases.main.receipts import ReceiptsWorkerStore +from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore +from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.types import StateMap from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext @@ -48,15 +52,17 @@ logger = logging.getLogger("synapse.app.admin_cmd") class AdminCmdSlavedStore( - SlavedReceiptsStore, - SlavedAccountDataStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, SlavedFilteringStore, - SlavedDeviceInboxStore, SlavedDeviceStore, SlavedPushRuleStore, SlavedEventStore, + TagsWorkerStore, + DeviceInboxWorkerStore, + AccountDataWorkerStore, + ApplicationServiceTransactionWorkerStore, + ApplicationServiceWorkerStore, + RegistrationWorkerStore, + ReceiptsWorkerStore, RoomWorkerStore, ): def __init__( diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 0c16584abc..42d1f6d219 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -48,19 +48,12 @@ from synapse.http.site import SynapseRequest, SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.keys import SlavedKeyStore -from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.pushers import SlavedPusherStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client import ( account_data, @@ -99,8 +92,15 @@ from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer +from synapse.storage.databases.main.account_data import AccountDataWorkerStore +from synapse.storage.databases.main.appservice import ( + ApplicationServiceTransactionWorkerStore, + ApplicationServiceWorkerStore, +) from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore +from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore +from synapse.storage.databases.main.directory import DirectoryWorkerStore from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore from synapse.storage.databases.main.lock import LockStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore @@ -109,11 +109,15 @@ from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import PresenceStore +from synapse.storage.databases.main.profile import ProfileWorkerStore +from synapse.storage.databases.main.receipts import ReceiptsWorkerStore +from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore from synapse.storage.databases.main.room_batch import RoomBatchStore from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.session import SessionStore from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore @@ -226,11 +230,11 @@ class GenericWorkerSlavedStore( UIAuthWorkerStore, EndToEndRoomKeyStore, PresenceStore, - SlavedDeviceInboxStore, + DeviceInboxWorkerStore, SlavedDeviceStore, - SlavedReceiptsStore, SlavedPushRuleStore, - SlavedAccountDataStore, + TagsWorkerStore, + AccountDataWorkerStore, SlavedPusherStore, CensorEventsStore, ClientIpWorkerStore, @@ -238,14 +242,16 @@ class GenericWorkerSlavedStore( SlavedKeyStore, RoomWorkerStore, RoomBatchStore, - DirectoryStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, - SlavedProfileStore, + DirectoryWorkerStore, + ApplicationServiceTransactionWorkerStore, + ApplicationServiceWorkerStore, + ProfileWorkerStore, SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, ServerMetricsStore, + ReceiptsWorkerStore, + RegistrationWorkerStore, SearchStore, TransactionWorkerStore, LockStore, diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py deleted file mode 100644 index 57d3237981..0000000000 --- a/synapse/replication/slave/storage/account_data.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.account_data import AccountDataWorkerStore -from synapse.storage.databases.main.tags import TagsWorkerStore - - -class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore): - pass diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py deleted file mode 100644 index 29f50c0add..0000000000 --- a/synapse/replication/slave/storage/appservice.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.appservice import ( - ApplicationServiceTransactionWorkerStore, - ApplicationServiceWorkerStore, -) - - -class SlavedApplicationServiceStore( - ApplicationServiceTransactionWorkerStore, ApplicationServiceWorkerStore -): - pass diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py deleted file mode 100644 index df9e4d8f45..0000000000 --- a/synapse/replication/slave/storage/deviceinbox.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore - - -class SlavedDeviceInboxStore(DeviceInboxWorkerStore): - pass diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py deleted file mode 100644 index ca716df3df..0000000000 --- a/synapse/replication/slave/storage/directory.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.directory import DirectoryWorkerStore - - -class DirectoryStore(DirectoryWorkerStore): - pass diff --git a/synapse/replication/slave/storage/profile.py b/synapse/replication/slave/storage/profile.py deleted file mode 100644 index a774a2ff48..0000000000 --- a/synapse/replication/slave/storage/profile.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.profile import ProfileWorkerStore - - -class SlavedProfileStore(ProfileWorkerStore): - pass diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py deleted file mode 100644 index 407862a2b2..0000000000 --- a/synapse/replication/slave/storage/receipts.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.receipts import ReceiptsWorkerStore - - -class SlavedReceiptsStore(ReceiptsWorkerStore): - pass diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py deleted file mode 100644 index 52c593e59d..0000000000 --- a/synapse/replication/slave/storage/registration.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.registration import RegistrationWorkerStore - - -class SlavedRegistrationStore(RegistrationWorkerStore): - pass diff --git a/tests/replication/slave/storage/test_account_data.py b/tests/replication/slave/storage/test_account_data.py deleted file mode 100644 index 1524087c43..0000000000 --- a/tests/replication/slave/storage/test_account_data.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore - -from ._base import BaseSlavedStoreTestCase - -USER_ID = "@feeling:blue" -TYPE = "my.type" - - -class SlavedAccountDataStoreTestCase(BaseSlavedStoreTestCase): - - STORE_TYPE = SlavedAccountDataStore - - def test_user_account_data(self): - self.get_success( - self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 1}) - ) - self.replicate() - self.check( - "get_global_account_data_by_type_for_user", [USER_ID, TYPE], {"a": 1} - ) - - self.get_success( - self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 2}) - ) - self.replicate() - self.check( - "get_global_account_data_by_type_for_user", [USER_ID, TYPE], {"a": 2} - ) diff --git a/tests/replication/slave/storage/test_receipts.py b/tests/replication/slave/storage/test_receipts.py deleted file mode 100644 index 19f57115a1..0000000000 --- a/tests/replication/slave/storage/test_receipts.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.api.constants import ReceiptTypes -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.types import UserID, create_requester - -from tests.test_utils.event_injection import create_event - -from ._base import BaseSlavedStoreTestCase - -OTHER_USER_ID = "@other:test" -OUR_USER_ID = "@our:test" - - -class SlavedReceiptTestCase(BaseSlavedStoreTestCase): - - STORE_TYPE = SlavedReceiptsStore - - def prepare(self, reactor, clock, homeserver): - super().prepare(reactor, clock, homeserver) - self.room_creator = homeserver.get_room_creation_handler() - self.persist_event_storage_controller = ( - self.hs.get_storage_controllers().persistence - ) - - # Create a test user - self.ourUser = UserID.from_string(OUR_USER_ID) - self.ourRequester = create_requester(self.ourUser) - - # Create a second test user - self.otherUser = UserID.from_string(OTHER_USER_ID) - self.otherRequester = create_requester(self.otherUser) - - # Create a test room - info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) - self.room_id1 = info["room_id"] - - # Create a second test room - info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) - self.room_id2 = info["room_id"] - - # Join the second user to the first room - memberEvent, memberEventContext = self.get_success( - create_event( - self.hs, - room_id=self.room_id1, - type="m.room.member", - sender=self.otherRequester.user.to_string(), - state_key=self.otherRequester.user.to_string(), - content={"membership": "join"}, - ) - ) - self.get_success( - self.persist_event_storage_controller.persist_event( - memberEvent, memberEventContext - ) - ) - - # Join the second user to the second room - memberEvent, memberEventContext = self.get_success( - create_event( - self.hs, - room_id=self.room_id2, - type="m.room.member", - sender=self.otherRequester.user.to_string(), - state_key=self.otherRequester.user.to_string(), - content={"membership": "join"}, - ) - ) - self.get_success( - self.persist_event_storage_controller.persist_event( - memberEvent, memberEventContext - ) - ) - - def test_return_empty_with_no_data(self): - res = self.get_success( - self.master_store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] - ) - ) - self.assertEqual(res, {}) - - res = self.get_success( - self.master_store.get_receipts_for_user_with_orderings( - OUR_USER_ID, - [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], - ) - ) - self.assertEqual(res, {}) - - res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( - OUR_USER_ID, - self.room_id1, - [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], - ) - ) - self.assertEqual(res, None) - - def test_get_receipts_for_user(self): - # Send some events into the first room - event1_1_id = self.create_and_send_event( - self.room_id1, UserID.from_string(OTHER_USER_ID) - ) - event1_2_id = self.create_and_send_event( - self.room_id1, UserID.from_string(OTHER_USER_ID) - ) - - # Send public read receipt for the first event - self.get_success( - self.master_store.insert_receipt( - self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {} - ) - ) - # Send private read receipt for the second event - self.get_success( - self.master_store.insert_receipt( - self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} - ) - ) - - # Test we get the latest event when we want both private and public receipts - res = self.get_success( - self.master_store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] - ) - ) - self.assertEqual(res, {self.room_id1: event1_2_id}) - - # Test we get the older event when we want only public receipt - res = self.get_success( - self.master_store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) - ) - self.assertEqual(res, {self.room_id1: event1_1_id}) - - # Test we get the latest event when we want only the public receipt - res = self.get_success( - self.master_store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ_PRIVATE] - ) - ) - self.assertEqual(res, {self.room_id1: event1_2_id}) - - # Test receipt updating - self.get_success( - self.master_store.insert_receipt( - self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {} - ) - ) - res = self.get_success( - self.master_store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) - ) - self.assertEqual(res, {self.room_id1: event1_2_id}) - - # Send some events into the second room - event2_1_id = self.create_and_send_event( - self.room_id2, UserID.from_string(OTHER_USER_ID) - ) - - # Test new room is reflected in what the method returns - self.get_success( - self.master_store.insert_receipt( - self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} - ) - ) - res = self.get_success( - self.master_store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] - ) - ) - self.assertEqual(res, {self.room_id1: event1_2_id, self.room_id2: event2_1_id}) - - def test_get_last_receipt_event_id_for_user(self): - # Send some events into the first room - event1_1_id = self.create_and_send_event( - self.room_id1, UserID.from_string(OTHER_USER_ID) - ) - event1_2_id = self.create_and_send_event( - self.room_id1, UserID.from_string(OTHER_USER_ID) - ) - - # Send public read receipt for the first event - self.get_success( - self.master_store.insert_receipt( - self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {} - ) - ) - # Send private read receipt for the second event - self.get_success( - self.master_store.insert_receipt( - self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} - ) - ) - - # Test we get the latest event when we want both private and public receipts - res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( - OUR_USER_ID, - self.room_id1, - [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], - ) - ) - self.assertEqual(res, event1_2_id) - - # Test we get the older event when we want only public receipt - res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( - OUR_USER_ID, self.room_id1, [ReceiptTypes.READ] - ) - ) - self.assertEqual(res, event1_1_id) - - # Test we get the latest event when we want only the private receipt - res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( - OUR_USER_ID, self.room_id1, [ReceiptTypes.READ_PRIVATE] - ) - ) - self.assertEqual(res, event1_2_id) - - # Test receipt updating - self.get_success( - self.master_store.insert_receipt( - self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {} - ) - ) - res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( - OUR_USER_ID, self.room_id1, [ReceiptTypes.READ] - ) - ) - self.assertEqual(res, event1_2_id) - - # Send some events into the second room - event2_1_id = self.create_and_send_event( - self.room_id2, UserID.from_string(OTHER_USER_ID) - ) - - # Test new room is reflected in what the method returns - self.get_success( - self.master_store.insert_receipt( - self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} - ) - ) - res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( - OUR_USER_ID, - self.room_id2, - [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], - ) - ) - self.assertEqual(res, event2_1_id) diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py new file mode 100644 index 0000000000..b1a8f8bba7 --- /dev/null +++ b/tests/storage/test_receipts.py @@ -0,0 +1,261 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.api.constants import ReceiptTypes +from synapse.types import UserID, create_requester + +from tests.test_utils.event_injection import create_event +from tests.unittest import HomeserverTestCase + +OTHER_USER_ID = "@other:test" +OUR_USER_ID = "@our:test" + + +class ReceiptTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, homeserver): + super().prepare(reactor, clock, homeserver) + + self.store = homeserver.get_datastores().main + + self.room_creator = homeserver.get_room_creation_handler() + self.persist_event_storage_controller = ( + self.hs.get_storage_controllers().persistence + ) + + # Create a test user + self.ourUser = UserID.from_string(OUR_USER_ID) + self.ourRequester = create_requester(self.ourUser) + + # Create a second test user + self.otherUser = UserID.from_string(OTHER_USER_ID) + self.otherRequester = create_requester(self.otherUser) + + # Create a test room + info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) + self.room_id1 = info["room_id"] + + # Create a second test room + info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) + self.room_id2 = info["room_id"] + + # Join the second user to the first room + memberEvent, memberEventContext = self.get_success( + create_event( + self.hs, + room_id=self.room_id1, + type="m.room.member", + sender=self.otherRequester.user.to_string(), + state_key=self.otherRequester.user.to_string(), + content={"membership": "join"}, + ) + ) + self.get_success( + self.persist_event_storage_controller.persist_event( + memberEvent, memberEventContext + ) + ) + + # Join the second user to the second room + memberEvent, memberEventContext = self.get_success( + create_event( + self.hs, + room_id=self.room_id2, + type="m.room.member", + sender=self.otherRequester.user.to_string(), + state_key=self.otherRequester.user.to_string(), + content={"membership": "join"}, + ) + ) + self.get_success( + self.persist_event_storage_controller.persist_event( + memberEvent, memberEventContext + ) + ) + + def test_return_empty_with_no_data(self): + res = self.get_success( + self.store.get_receipts_for_user( + OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, {}) + + res = self.get_success( + self.store.get_receipts_for_user_with_orderings( + OUR_USER_ID, + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + ) + ) + self.assertEqual(res, {}) + + res = self.get_success( + self.store.get_last_receipt_event_id_for_user( + OUR_USER_ID, + self.room_id1, + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + ) + ) + self.assertEqual(res, None) + + def test_get_receipts_for_user(self): + # Send some events into the first room + event1_1_id = self.create_and_send_event( + self.room_id1, UserID.from_string(OTHER_USER_ID) + ) + event1_2_id = self.create_and_send_event( + self.room_id1, UserID.from_string(OTHER_USER_ID) + ) + + # Send public read receipt for the first event + self.get_success( + self.store.insert_receipt( + self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {} + ) + ) + # Send private read receipt for the second event + self.get_success( + self.store.insert_receipt( + self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} + ) + ) + + # Test we get the latest event when we want both private and public receipts + res = self.get_success( + self.store.get_receipts_for_user( + OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, {self.room_id1: event1_2_id}) + + # Test we get the older event when we want only public receipt + res = self.get_success( + self.store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) + ) + self.assertEqual(res, {self.room_id1: event1_1_id}) + + # Test we get the latest event when we want only the public receipt + res = self.get_success( + self.store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ_PRIVATE]) + ) + self.assertEqual(res, {self.room_id1: event1_2_id}) + + # Test receipt updating + self.get_success( + self.store.insert_receipt( + self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {} + ) + ) + res = self.get_success( + self.store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) + ) + self.assertEqual(res, {self.room_id1: event1_2_id}) + + # Send some events into the second room + event2_1_id = self.create_and_send_event( + self.room_id2, UserID.from_string(OTHER_USER_ID) + ) + + # Test new room is reflected in what the method returns + self.get_success( + self.store.insert_receipt( + self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} + ) + ) + res = self.get_success( + self.store.get_receipts_for_user( + OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, {self.room_id1: event1_2_id, self.room_id2: event2_1_id}) + + def test_get_last_receipt_event_id_for_user(self): + # Send some events into the first room + event1_1_id = self.create_and_send_event( + self.room_id1, UserID.from_string(OTHER_USER_ID) + ) + event1_2_id = self.create_and_send_event( + self.room_id1, UserID.from_string(OTHER_USER_ID) + ) + + # Send public read receipt for the first event + self.get_success( + self.store.insert_receipt( + self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {} + ) + ) + # Send private read receipt for the second event + self.get_success( + self.store.insert_receipt( + self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} + ) + ) + + # Test we get the latest event when we want both private and public receipts + res = self.get_success( + self.store.get_last_receipt_event_id_for_user( + OUR_USER_ID, + self.room_id1, + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + ) + ) + self.assertEqual(res, event1_2_id) + + # Test we get the older event when we want only public receipt + res = self.get_success( + self.store.get_last_receipt_event_id_for_user( + OUR_USER_ID, self.room_id1, [ReceiptTypes.READ] + ) + ) + self.assertEqual(res, event1_1_id) + + # Test we get the latest event when we want only the private receipt + res = self.get_success( + self.store.get_last_receipt_event_id_for_user( + OUR_USER_ID, self.room_id1, [ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, event1_2_id) + + # Test receipt updating + self.get_success( + self.store.insert_receipt( + self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {} + ) + ) + res = self.get_success( + self.store.get_last_receipt_event_id_for_user( + OUR_USER_ID, self.room_id1, [ReceiptTypes.READ] + ) + ) + self.assertEqual(res, event1_2_id) + + # Send some events into the second room + event2_1_id = self.create_and_send_event( + self.room_id2, UserID.from_string(OTHER_USER_ID) + ) + + # Test new room is reflected in what the method returns + self.get_success( + self.store.insert_receipt( + self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} + ) + ) + res = self.get_success( + self.store.get_last_receipt_event_id_for_user( + OUR_USER_ID, + self.room_id2, + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + ) + ) + self.assertEqual(res, event2_1_id) -- cgit 1.4.1 From 158782c3ce1eb92e98df50645b03afcab4f22db0 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 22 Jul 2022 10:13:01 +0100 Subject: Skip soft fail checks for rooms with partial state (#13354) When a room has the partial state flag, we may not have an accurate `m.room.member` event for event senders in the room's current state, and so cannot perform soft fail checks correctly. Skip the soft fail check entirely in this case. As an alternative, we could block until we have full state, but that would prevent us from receiving incoming events over federation, which is undesirable. Signed-off-by: Sean Quah --- changelog.d/13354.misc | 1 + synapse/handlers/federation_event.py | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100644 changelog.d/13354.misc diff --git a/changelog.d/13354.misc b/changelog.d/13354.misc new file mode 100644 index 0000000000..e08ee7866a --- /dev/null +++ b/changelog.d/13354.misc @@ -0,0 +1 @@ +Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index a5f4ce7c8a..9d9f1696f2 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1664,11 +1664,21 @@ class FederationEventHandler: """Checks if we should soft fail the event; if so, marks the event as such. + Does nothing for events in rooms with partial state, since we may not have an + accurate membership event for the sender in the current state. + Args: event state_ids: The state at the event if we don't have all the event's prev events origin: The host the event originates from. """ + if await self._store.is_partial_state_room(event.room_id): + # We might not know the sender's membership in the current state, so don't + # soft fail anything. Even if we do have a membership for the sender in the + # current state, it may have been derived from state resolution between + # partial and full state and may not be accurate. + return + extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id) extrem_ids = set(extrem_ids_list) prev_event_ids = set(event.prev_event_ids()) -- cgit 1.4.1 From 0fa41a7b172cb157c6d6df41e7ae4bf5cbdc0d36 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 22 Jul 2022 10:26:09 +0100 Subject: Update locked frozendict version to 2.3.3 (#13352) frozendict 2.3.3 includes fixes for memory leaks that get triggered during `/sync`. --- changelog.d/13352.bugfix | 1 + docs/upgrade.md | 9 +++++++++ poetry.lock | 36 ++++++++++++++++++------------------ 3 files changed, 28 insertions(+), 18 deletions(-) create mode 100644 changelog.d/13352.bugfix diff --git a/changelog.d/13352.bugfix b/changelog.d/13352.bugfix new file mode 100644 index 0000000000..8128714299 --- /dev/null +++ b/changelog.d/13352.bugfix @@ -0,0 +1 @@ +Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. diff --git a/docs/upgrade.md b/docs/upgrade.md index 2c7c258909..fadb8e7ffb 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -114,6 +114,15 @@ vice versa. Once all workers are upgraded to v1.64 (or downgraded to v1.63), event replication will resume as normal. +## frozendict release + +[frozendict 2.3.3](https://github.com/Marco-Sulla/python-frozendict/releases/tag/v2.3.3) +has recently been released, which fixes a memory leak that occurs during `/sync` +requests. We advise server administrators who installed Synapse via pip to upgrade +frozendict with `pip install --upgrade frozendict`. The Docker image +`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` already +include the updated library. + # Upgrading to v1.62.0 ## New signatures for spam checker callbacks diff --git a/poetry.lock b/poetry.lock index 41ab40edd1..b62c24ae16 100644 --- a/poetry.lock +++ b/poetry.lock @@ -290,7 +290,7 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "frozendict" -version = "2.3.2" +version = "2.3.3" description = "A simple immutable dictionary" category = "main" optional = false @@ -1753,23 +1753,23 @@ flake8-comprehensions = [ {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"}, ] frozendict = [ - {file = "frozendict-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4fb171d1e84d17335365877e19d17440373b47ca74a73c06f65ac0b16d01e87f"}, - {file = "frozendict-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a3640e9d7533d164160b758351aa49d9e85bbe0bd76d219d4021e90ffa6a52"}, - {file = "frozendict-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:87cfd00fafbc147d8cd2590d1109b7db8ac8d7d5bdaa708ba46caee132b55d4d"}, - {file = "frozendict-2.3.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fb09761e093cfabb2f179dbfdb2521e1ec5701df714d1eb5c51fa7849027be19"}, - {file = "frozendict-2.3.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82176dc7adf01cf8f0193e909401939415a230a1853f4a672ec1629a06ceae18"}, - {file = "frozendict-2.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:c1c70826aa4a50fa283fe161834ac4a3ac7c753902c980bb8b595b0998a38ddb"}, - {file = "frozendict-2.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1db5035ddbed995badd1a62c4102b5e207b5aeb24472df2c60aba79639d7996b"}, - {file = "frozendict-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4246fc4cb1413645ba4d3513939b90d979a5bae724be605a10b2b26ee12f839c"}, - {file = "frozendict-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:680cd42fb0a255da1ce45678ccbd7f69da750d5243809524ebe8f45b2eda6e6b"}, - {file = "frozendict-2.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a7f3a181d6722c92a9fab12d0c5c2b006a18ca5666098531f316d1e1c8984e3"}, - {file = "frozendict-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1cb866eabb3c1384a7fe88e1e1033e2b6623073589012ab637c552bf03f6364"}, - {file = "frozendict-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:952c5e5e664578c5c2ce8489ee0ab6a1855da02b58ef593ee728fc10d672641a"}, - {file = "frozendict-2.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:608b77904cd0117cd816df605a80d0043a5326ee62529327d2136c792165a823"}, - {file = "frozendict-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0eed41fd326f0bcc779837d8d9e1374da1bc9857fe3b9f2910195bbd5fff3aeb"}, - {file = "frozendict-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:bde28db6b5868dd3c45b3555f9d1dc5a1cca6d93591502fa5dcecce0dde6a335"}, - {file = "frozendict-2.3.2-py3-none-any.whl", hash = "sha256:6882a9bbe08ab9b5ff96ce11bdff3fe40b114b9813bc6801261e2a7b45e20012"}, - {file = "frozendict-2.3.2.tar.gz", hash = "sha256:7fac4542f0a13fbe704db4942f41ba3abffec5af8b100025973e59dff6a09d0d"}, + {file = "frozendict-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39942914c1217a5a49c7551495a103b3dbd216e19413687e003b859c6b0ebc12"}, + {file = "frozendict-2.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5589256058b31f2b91419fa30b8dc62dbdefe7710e688a3fd5b43849161eecc9"}, + {file = "frozendict-2.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:35eb7e59e287c41f4f712d4d3d2333354175b155d217b97c99c201d2d8920790"}, + {file = "frozendict-2.3.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:310aaf81793abf4f471895e6fe65e0e74a28a2aaf7b25c2ba6ccd4e35af06842"}, + {file = "frozendict-2.3.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c353c11010a986566a0cb37f9a783c560ffff7d67d5e7fd52221fb03757cdc43"}, + {file = "frozendict-2.3.3-cp36-cp36m-win_amd64.whl", hash = "sha256:15b5f82aad108125336593cec1b6420c638bf45f449c57e50949fc7654ea5a41"}, + {file = "frozendict-2.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a4737e5257756bd6b877504ff50185b705db577b5330d53040a6cf6417bb3cdb"}, + {file = "frozendict-2.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a14c11e33e8b0bc09e07bba3732c77a502c39edb8c3959fd9a0e490e031158"}, + {file = "frozendict-2.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:027952d1698ac9c766ef43711226b178cdd49d2acbdff396936639ad1d2a5615"}, + {file = "frozendict-2.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ef818d66c85098a37cf42509545a4ba7dd0c4c679d6262123a8dc14cc474bab7"}, + {file = "frozendict-2.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812279f2b270c980112dc4e367b168054f937108f8044eced4199e0ab2945a37"}, + {file = "frozendict-2.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:c1fb7efbfebc2075f781be3d9774e4ba6ce4fc399148b02097f68d4b3c4bc00a"}, + {file = "frozendict-2.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0b46d4bf95bce843c0151959d54c3e5b8d0ce29cb44794e820b3ec980d63eee"}, + {file = "frozendict-2.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38c4660f37fcc70a32ff997fe58e40b3fcc60b2017b286e33828efaa16b01308"}, + {file = "frozendict-2.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:919e3609844fece11ab18bcbf28a3ed20f8108ad4149d7927d413687f281c6c9"}, + {file = "frozendict-2.3.3-py3-none-any.whl", hash = "sha256:f988b482d08972a196664718167a993a61c9e9f6fe7b0ca2443570b5f20ca44a"}, + {file = "frozendict-2.3.3.tar.gz", hash = "sha256:398539c52af3c647d103185bbaa1291679f0507ad035fe3bab2a8b0366d52cf1"}, ] gitdb = [ {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, -- cgit 1.4.1 From c7c84b81e3ec3d66f3a57a8d6ba3e58dd4c81ecc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 22 Jul 2022 13:50:20 +0100 Subject: Update config_documentation.md (#13364) "changed in" goes before the example --- docs/usage/configuration/config_documentation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 11d1574484..a10f6662eb 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1530,6 +1530,8 @@ cannot *receive* more than a burst of 5 invites at a time. In contrast, the `rc_invites.per_issuer` limit applies to the *issuer* of the invite, meaning that a `rc_invite.per_issuer.burst_count` of 5 mandates that single user cannot *send* more than a burst of 5 invites at a time. +_Changed in version 1.63:_ added the `per_issuer` limit. + Example configuration: ```yaml rc_invites: @@ -1544,8 +1546,6 @@ rc_invites: burst_count: 5 ``` -_Changed in version 1.63:_ added the `per_issuer` limit. - --- ### `rc_third_party_invite` -- cgit 1.4.1 From 357561c1a2b2da6d1a7ad1e2340217fee18cc2b1 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 22 Jul 2022 16:00:11 -0500 Subject: Backfill remote event fetched by MSC3030 so we can paginate from it later (#13205) Depends on https://github.com/matrix-org/synapse/pull/13320 Complement tests: https://github.com/matrix-org/complement/pull/406 We could use the same method to backfill for `/context` as well in the future, see https://github.com/matrix-org/synapse/issues/3848 --- changelog.d/13205.feature | 1 + synapse/handlers/federation_event.py | 49 +++++++++++++++++++++++++++++- synapse/handlers/room.py | 59 +++++++++++++++++++++++++++--------- 3 files changed, 94 insertions(+), 15 deletions(-) create mode 100644 changelog.d/13205.feature diff --git a/changelog.d/13205.feature b/changelog.d/13205.feature new file mode 100644 index 0000000000..d89aa9aa75 --- /dev/null +++ b/changelog.d/13205.feature @@ -0,0 +1 @@ +Allow pagination from remote event after discovering it from MSC3030 `/timestamp_to_event`. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9d9f1696f2..16f20c8be7 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -793,7 +793,7 @@ class FederationEventHandler: if existing: if not existing.internal_metadata.is_outlier(): logger.info( - "Ignoring received event %s which we have already seen", + "_process_pulled_event: Ignoring received event %s which we have already seen", event_id, ) return @@ -1329,6 +1329,53 @@ class FederationEventHandler: marker_event, ) + async def backfill_event_id( + self, destination: str, room_id: str, event_id: str + ) -> EventBase: + """Backfill a single event and persist it as a non-outlier which means + we also pull in all of the state and auth events necessary for it. + + Args: + destination: The homeserver to pull the given event_id from. + room_id: The room where the event is from. + event_id: The event ID to backfill. + + Raises: + FederationError if we are unable to find the event from the destination + """ + logger.info( + "backfill_event_id: event_id=%s from destination=%s", event_id, destination + ) + + room_version = await self._store.get_room_version(room_id) + + event_from_response = await self._federation_client.get_pdu( + [destination], + event_id, + room_version, + ) + + if not event_from_response: + raise FederationError( + "ERROR", + 404, + "Unable to find event_id=%s from destination=%s to backfill." + % (event_id, destination), + affected=event_id, + ) + + # Persist the event we just fetched, including pulling all of the state + # and auth events to de-outlier it. This also sets up the necessary + # `state_groups` for the event. + await self._process_pulled_events( + destination, + [event_from_response], + # Prevent notifications going to clients + backfilled=True, + ) + + return event_from_response + async def _get_events_and_persist( self, destination: str, room_id: str, event_ids: Collection[str] ) -> None: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 978d3ee39f..55395457c3 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1384,6 +1384,7 @@ class TimestampLookupHandler: self.store = hs.get_datastores().main self.state_handler = hs.get_state_handler() self.federation_client = hs.get_federation_client() + self.federation_event_handler = hs.get_federation_event_handler() self._storage_controllers = hs.get_storage_controllers() async def get_event_for_timestamp( @@ -1479,38 +1480,68 @@ class TimestampLookupHandler: remote_response, ) - # TODO: Do we want to persist this as an extremity? - # TODO: I think ideally, we would try to backfill from - # this event and run this whole - # `get_event_for_timestamp` function again to make sure - # they didn't give us an event from their gappy history. remote_event_id = remote_response.event_id - origin_server_ts = remote_response.origin_server_ts + remote_origin_server_ts = remote_response.origin_server_ts + + # Backfill this event so we can get a pagination token for + # it with `/context` and paginate `/messages` from this + # point. + # + # TODO: The requested timestamp may lie in a part of the + # event graph that the remote server *also* didn't have, + # in which case they will have returned another event + # which may be nowhere near the requested timestamp. In + # the future, we may need to reconcile that gap and ask + # other homeservers, and/or extend `/timestamp_to_event` + # to return events on *both* sides of the timestamp to + # help reconcile the gap faster. + remote_event = ( + await self.federation_event_handler.backfill_event_id( + domain, room_id, remote_event_id + ) + ) + + # XXX: When we see that the remote server is not trustworthy, + # maybe we should not ask them first in the future. + if remote_origin_server_ts != remote_event.origin_server_ts: + logger.info( + "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.", + domain, + remote_event_id, + remote_origin_server_ts, + remote_event.origin_server_ts, + ) # Only return the remote event if it's closer than the local event if not local_event or ( - abs(origin_server_ts - timestamp) + abs(remote_event.origin_server_ts - timestamp) < abs(local_event.origin_server_ts - timestamp) ): - return remote_event_id, origin_server_ts + logger.info( + "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)", + remote_event_id, + remote_event.origin_server_ts, + timestamp, + local_event.event_id if local_event else None, + local_event.origin_server_ts if local_event else None, + ) + return remote_event_id, remote_origin_server_ts except (HttpResponseException, InvalidResponseError) as ex: # Let's not put a high priority on some other homeserver # failing to respond or giving a random response logger.debug( - "Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", + "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", domain, type(ex).__name__, ex, ex.args, ) - except Exception as ex: + except Exception: # But we do want to see some exceptions in our code logger.warning( - "Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", + "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception", domain, - type(ex).__name__, - ex, - ex.args, + exc_info=True, ) # To appease mypy, we have to add both of these conditions to check for -- cgit 1.4.1 From 43adf2521cc6952dcc7f0e3006dbfe52db85721a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jul 2022 10:21:06 +0100 Subject: Refactor presence so we can prune user in room caches (#13313) See #10826 and #10786 for context as to why we had to disable pruning on those caches. Now that `get_users_who_share_room_with_user` is called frequently only for presence, we just need to make calls to it less frequent and then we can remove the various levels of caching that is going on. --- changelog.d/13313.misc | 1 + synapse/handlers/presence.py | 112 +++++++++------------------ synapse/storage/_base.py | 4 + synapse/storage/databases/main/roommember.py | 83 ++++++++++++++++---- 4 files changed, 109 insertions(+), 91 deletions(-) create mode 100644 changelog.d/13313.misc diff --git a/changelog.d/13313.misc b/changelog.d/13313.misc new file mode 100644 index 0000000000..0f3c1f0afd --- /dev/null +++ b/changelog.d/13313.misc @@ -0,0 +1 @@ +Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 895ea63ed3..741504ba9f 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -34,7 +34,6 @@ from typing import ( Callable, Collection, Dict, - FrozenSet, Generator, Iterable, List, @@ -42,7 +41,6 @@ from typing import ( Set, Tuple, Type, - Union, ) from prometheus_client import Counter @@ -68,7 +66,6 @@ from synapse.storage.databases.main import DataStore from synapse.streams import EventSource from synapse.types import JsonDict, StreamKeyType, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer -from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -1656,15 +1653,18 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): # doesn't return. C.f. #5503. return [], max_token - # Figure out which other users this user should receive updates for - users_interested_in = await self._get_interested_in(user, explicit_room_id) + # Figure out which other users this user should explicitly receive + # updates for + additional_users_interested_in = ( + await self.get_presence_router().get_interested_users(user.to_string()) + ) # We have a set of users that we're interested in the presence of. We want to # cross-reference that with the users that have actually changed their presence. # Check whether this user should see all user updates - if users_interested_in == PresenceRouter.ALL_USERS: + if additional_users_interested_in == PresenceRouter.ALL_USERS: # Provide presence state for all users presence_updates = await self._filter_all_presence_updates_for_user( user_id, include_offline, from_key @@ -1673,34 +1673,47 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): return presence_updates, max_token # Make mypy happy. users_interested_in should now be a set - assert not isinstance(users_interested_in, str) + assert not isinstance(additional_users_interested_in, str) + + # We always care about our own presence. + additional_users_interested_in.add(user_id) + + if explicit_room_id: + user_ids = await self.store.get_users_in_room(explicit_room_id) + additional_users_interested_in.update(user_ids) # The set of users that we're interested in and that have had a presence update. # We'll actually pull the presence updates for these users at the end. - interested_and_updated_users: Union[Set[str], FrozenSet[str]] = set() + interested_and_updated_users: Collection[str] if from_key is not None: # First get all users that have had a presence update updated_users = stream_change_cache.get_all_entities_changed(from_key) # Cross-reference users we're interested in with those that have had updates. - # Use a slightly-optimised method for processing smaller sets of updates. - if updated_users is not None and len(updated_users) < 500: - # For small deltas, it's quicker to get all changes and then - # cross-reference with the users we're interested in + if updated_users is not None: + # If we have the full list of changes for presence we can + # simply check which ones share a room with the user. get_updates_counter.labels("stream").inc() - for other_user_id in updated_users: - if other_user_id in users_interested_in: - # mypy thinks this variable could be a FrozenSet as it's possibly set - # to one in the `get_entities_changed` call below, and `add()` is not - # method on a FrozenSet. That doesn't affect us here though, as - # `interested_and_updated_users` is clearly a set() above. - interested_and_updated_users.add(other_user_id) # type: ignore + + sharing_users = await self.store.do_users_share_a_room( + user_id, updated_users + ) + + interested_and_updated_users = ( + sharing_users.union(additional_users_interested_in) + ).intersection(updated_users) + else: # Too many possible updates. Find all users we can see and check # if any of them have changed. get_updates_counter.labels("full").inc() + users_interested_in = ( + await self.store.get_users_who_share_room_with_user(user_id) + ) + users_interested_in.update(additional_users_interested_in) + interested_and_updated_users = ( stream_change_cache.get_entities_changed( users_interested_in, from_key @@ -1709,7 +1722,10 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): else: # No from_key has been specified. Return the presence for all users # this user is interested in - interested_and_updated_users = users_interested_in + interested_and_updated_users = ( + await self.store.get_users_who_share_room_with_user(user_id) + ) + interested_and_updated_users.update(additional_users_interested_in) # Retrieve the current presence state for each user users_to_state = await self.get_presence_handler().current_state_for_users( @@ -1804,62 +1820,6 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): def get_current_key(self) -> int: return self.store.get_current_presence_token() - @cached(num_args=2, cache_context=True) - async def _get_interested_in( - self, - user: UserID, - explicit_room_id: Optional[str] = None, - cache_context: Optional[_CacheContext] = None, - ) -> Union[Set[str], str]: - """Returns the set of users that the given user should see presence - updates for. - - Args: - user: The user to retrieve presence updates for. - explicit_room_id: The users that are in the room will be returned. - - Returns: - A set of user IDs to return presence updates for, or "ALL" to return all - known updates. - """ - user_id = user.to_string() - users_interested_in = set() - users_interested_in.add(user_id) # So that we receive our own presence - - # cache_context isn't likely to ever be None due to the @cached decorator, - # but we can't have a non-optional argument after the optional argument - # explicit_room_id either. Assert cache_context is not None so we can use it - # without mypy complaining. - assert cache_context - - # Check with the presence router whether we should poll additional users for - # their presence information - additional_users = await self.get_presence_router().get_interested_users( - user.to_string() - ) - if additional_users == PresenceRouter.ALL_USERS: - # If the module requested that this user see the presence updates of *all* - # users, then simply return that instead of calculating what rooms this - # user shares - return PresenceRouter.ALL_USERS - - # Add the additional users from the router - users_interested_in.update(additional_users) - - # Find the users who share a room with this user - users_who_share_room = await self.store.get_users_who_share_room_with_user( - user_id, on_invalidate=cache_context.invalidate - ) - users_interested_in.update(users_who_share_room) - - if explicit_room_id: - user_ids = await self.store.get_users_in_room( - explicit_room_id, on_invalidate=cache_context.invalidate - ) - users_interested_in.update(user_ids) - - return users_interested_in - def handle_timeouts( user_states: List[UserPresenceState], diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index a2f8310388..e30f9c76d4 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -80,6 +80,10 @@ class SQLBaseStore(metaclass=ABCMeta): ) self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) + # There's no easy way of invalidating this cache for just the users + # that have changed, so we just clear the entire thing. + self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) + for user_id in members_changed: self._attempt_to_invalidate_cache( "get_user_in_room_with_profile", (room_id, user_id) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index df6b82660e..e2cccc688c 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -21,6 +21,7 @@ from typing import ( FrozenSet, Iterable, List, + Mapping, Optional, Set, Tuple, @@ -55,6 +56,7 @@ from synapse.types import JsonDict, PersistedEventPosition, StateMap, get_domain from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList +from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -183,7 +185,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): self._check_safe_current_state_events_membership_updated_txn, ) - @cached(max_entries=100000, iterable=True, prune_unread_entries=False) + @cached(max_entries=100000, iterable=True) async def get_users_in_room(self, room_id: str) -> List[str]: return await self.db_pool.runInteraction( "get_users_in_room", self.get_users_in_room_txn, room_id @@ -561,7 +563,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): return results_dict.get("membership"), results_dict.get("event_id") - @cached(max_entries=500000, iterable=True, prune_unread_entries=False) + @cached(max_entries=500000, iterable=True) async def get_rooms_for_user_with_stream_ordering( self, user_id: str ) -> FrozenSet[GetRoomsForUserWithStreamOrdering]: @@ -732,25 +734,76 @@ class RoomMemberWorkerStore(EventsWorkerStore): ) return frozenset(r.room_id for r in rooms) - @cached( - max_entries=500000, - cache_context=True, - iterable=True, - prune_unread_entries=False, + @cached(max_entries=10000) + async def does_pair_of_users_share_a_room( + self, user_id: str, other_user_id: str + ) -> bool: + raise NotImplementedError() + + @cachedList( + cached_method_name="does_pair_of_users_share_a_room", list_name="other_user_ids" ) - async def get_users_who_share_room_with_user( - self, user_id: str, cache_context: _CacheContext + async def _do_users_share_a_room( + self, user_id: str, other_user_ids: Collection[str] + ) -> Mapping[str, Optional[bool]]: + """Return mapping from user ID to whether they share a room with the + given user. + + Note: `None` and `False` are equivalent and mean they don't share a + room. + """ + + def do_users_share_a_room_txn( + txn: LoggingTransaction, user_ids: Collection[str] + ) -> Dict[str, bool]: + clause, args = make_in_list_sql_clause( + self.database_engine, "state_key", user_ids + ) + + # This query works by fetching both the list of rooms for the target + # user and the set of other users, and then checking if there is any + # overlap. + sql = f""" + SELECT b.state_key + FROM ( + SELECT room_id FROM current_state_events + WHERE type = 'm.room.member' AND membership = 'join' AND state_key = ? + ) AS a + INNER JOIN ( + SELECT room_id, state_key FROM current_state_events + WHERE type = 'm.room.member' AND membership = 'join' AND {clause} + ) AS b using (room_id) + LIMIT 1 + """ + + txn.execute(sql, (user_id, *args)) + return {u: True for u, in txn} + + to_return = {} + for batch_user_ids in batch_iter(other_user_ids, 1000): + res = await self.db_pool.runInteraction( + "do_users_share_a_room", do_users_share_a_room_txn, batch_user_ids + ) + to_return.update(res) + + return to_return + + async def do_users_share_a_room( + self, user_id: str, other_user_ids: Collection[str] ) -> Set[str]: + """Return the set of users who share a room with the first users""" + + user_dict = await self._do_users_share_a_room(user_id, other_user_ids) + + return {u for u, share_room in user_dict.items() if share_room} + + async def get_users_who_share_room_with_user(self, user_id: str) -> Set[str]: """Returns the set of users who share a room with `user_id`""" - room_ids = await self.get_rooms_for_user( - user_id, on_invalidate=cache_context.invalidate - ) + room_ids = await self.get_rooms_for_user(user_id) user_who_share_room = set() for room_id in room_ids: - user_ids = await self.get_users_in_room( - room_id, on_invalidate=cache_context.invalidate - ) + user_ids = await self.get_users_in_room(room_id) user_who_share_room.update(user_ids) return user_who_share_room -- cgit 1.4.1 From 908aeac44ab3acc1c42fd6c33ef716ddf51142a7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 25 Jul 2022 08:34:06 -0400 Subject: Additional fixes for opentracing type hints. (#13362) --- changelog.d/13362.misc | 1 + synapse/logging/opentracing.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13362.misc diff --git a/changelog.d/13362.misc b/changelog.d/13362.misc new file mode 100644 index 0000000000..c80578ce95 --- /dev/null +++ b/changelog.d/13362.misc @@ -0,0 +1 @@ +Add missing type hints to open tracing module. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index ad5cbf46a4..c1aa205eed 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -910,8 +910,8 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: def _tag_args_inner(*args: P.args, **kwargs: P.kwargs) -> R: argspec = inspect.getfullargspec(func) for i, arg in enumerate(argspec.args[1:]): - set_tag("ARG_" + arg, args[i]) # type: ignore[index] - set_tag("args", args[len(argspec.args) :]) # type: ignore[index] + set_tag("ARG_" + arg, str(args[i])) # type: ignore[index] + set_tag("args", str(args[len(argspec.args) :])) # type: ignore[index] set_tag("kwargs", str(kwargs)) return func(*args, **kwargs) -- cgit 1.4.1 From e8519e0ed289b67fa07c1bdbb6898852dc1a50b9 Mon Sep 17 00:00:00 2001 From: Jan Schär Date: Mon, 25 Jul 2022 17:27:19 +0200 Subject: Support Implicit TLS for sending emails (#13317) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, TLS could only be used with STARTTLS. Add a new option `force_tls`, where TLS is used from the start. Implicit TLS is recommended over STARTLS, see https://datatracker.ietf.org/doc/html/rfc8314 Fixes #8046. Signed-off-by: Jan Schär --- changelog.d/13317.feature | 1 + docs/usage/configuration/config_documentation.md | 11 ++++- synapse/config/emailconfig.py | 7 ++- synapse/handlers/send_email.py | 36 ++++++++++----- tests/handlers/test_send_email.py | 57 +++++++++++++++++++++++- 5 files changed, 99 insertions(+), 13 deletions(-) create mode 100644 changelog.d/13317.feature diff --git a/changelog.d/13317.feature b/changelog.d/13317.feature new file mode 100644 index 0000000000..e0ebd2b51f --- /dev/null +++ b/changelog.d/13317.feature @@ -0,0 +1 @@ +Support Implicit TLS for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index a10f6662eb..eefcc7829d 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3187,9 +3187,17 @@ Server admins can configure custom templates for email content. See This setting has the following sub-options: * `smtp_host`: The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. -* `smtp_port`: The port on the mail server for outgoing SMTP. Defaults to 25. +* `smtp_port`: The port on the mail server for outgoing SMTP. Defaults to 465 if `force_tls` is true, else 25. + + _Changed in Synapse 1.64.0:_ the default port is now aware of `force_tls`. * `smtp_user` and `smtp_pass`: Username/password for authentication to the SMTP server. By default, no authentication is attempted. +* `force_tls`: By default, Synapse connects over plain text and then optionally upgrades + to TLS via STARTTLS. If this option is set to true, TLS is used from the start (Implicit TLS), + and the option `require_transport_security` is ignored. + It is recommended to enable this if supported by your mail server. + + _New in Synapse 1.64.0._ * `require_transport_security`: Set to true to require TLS transport security for SMTP. By default, Synapse will connect over plain text, and will then switch to TLS via STARTTLS *if the SMTP server supports it*. If this option is set, @@ -3254,6 +3262,7 @@ email: smtp_port: 587 smtp_user: "exampleusername" smtp_pass: "examplepassword" + force_tls: true require_transport_security: true enable_tls: false notif_from: "Your Friendly %(app)s homeserver " diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 3ead80d985..73b469f414 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -85,14 +85,19 @@ class EmailConfig(Config): if email_config is None: email_config = {} + self.force_tls = email_config.get("force_tls", False) self.email_smtp_host = email_config.get("smtp_host", "localhost") - self.email_smtp_port = email_config.get("smtp_port", 25) + self.email_smtp_port = email_config.get( + "smtp_port", 465 if self.force_tls else 25 + ) self.email_smtp_user = email_config.get("smtp_user", None) self.email_smtp_pass = email_config.get("smtp_pass", None) self.require_transport_security = email_config.get( "require_transport_security", False ) self.enable_smtp_tls = email_config.get("enable_tls", True) + if self.force_tls and not self.enable_smtp_tls: + raise ConfigError("email.force_tls requires email.enable_tls to be true") if self.require_transport_security and not self.enable_smtp_tls: raise ConfigError( "email.require_transport_security requires email.enable_tls to be true" diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index a305a66860..e2844799e8 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -23,10 +23,12 @@ from pkg_resources import parse_version import twisted from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IOpenSSLContextFactory, IReactorTCP +from twisted.internet.interfaces import IOpenSSLContextFactory +from twisted.internet.ssl import optionsForClientTLS from twisted.mail.smtp import ESMTPSender, ESMTPSenderFactory from synapse.logging.context import make_deferred_yieldable +from synapse.types import ISynapseReactor if TYPE_CHECKING: from synapse.server import HomeServer @@ -48,7 +50,7 @@ class _NoTLSESMTPSender(ESMTPSender): async def _sendmail( - reactor: IReactorTCP, + reactor: ISynapseReactor, smtphost: str, smtpport: int, from_addr: str, @@ -59,6 +61,7 @@ async def _sendmail( require_auth: bool = False, require_tls: bool = False, enable_tls: bool = True, + force_tls: bool = False, ) -> None: """A simple wrapper around ESMTPSenderFactory, to allow substitution in tests @@ -73,8 +76,9 @@ async def _sendmail( password: password to give when authenticating require_auth: if auth is not offered, fail the request require_tls: if TLS is not offered, fail the reqest - enable_tls: True to enable TLS. If this is False and require_tls is True, + enable_tls: True to enable STARTTLS. If this is False and require_tls is True, the request will fail. + force_tls: True to enable Implicit TLS. """ msg = BytesIO(msg_bytes) d: "Deferred[object]" = Deferred() @@ -105,13 +109,23 @@ async def _sendmail( # set to enable TLS. factory = build_sender_factory(hostname=smtphost if enable_tls else None) - reactor.connectTCP( - smtphost, - smtpport, - factory, - timeout=30, - bindAddress=None, - ) + if force_tls: + reactor.connectSSL( + smtphost, + smtpport, + factory, + optionsForClientTLS(smtphost), + timeout=30, + bindAddress=None, + ) + else: + reactor.connectTCP( + smtphost, + smtpport, + factory, + timeout=30, + bindAddress=None, + ) await make_deferred_yieldable(d) @@ -132,6 +146,7 @@ class SendEmailHandler: self._smtp_pass = passwd.encode("utf-8") if passwd is not None else None self._require_transport_security = hs.config.email.require_transport_security self._enable_tls = hs.config.email.enable_smtp_tls + self._force_tls = hs.config.email.force_tls self._sendmail = _sendmail @@ -189,4 +204,5 @@ class SendEmailHandler: require_auth=self._smtp_user is not None, require_tls=self._require_transport_security, enable_tls=self._enable_tls, + force_tls=self._force_tls, ) diff --git a/tests/handlers/test_send_email.py b/tests/handlers/test_send_email.py index 6f77b1237c..da4bf8b582 100644 --- a/tests/handlers/test_send_email.py +++ b/tests/handlers/test_send_email.py @@ -23,7 +23,7 @@ from twisted.internet.defer import ensureDeferred from twisted.mail import interfaces, smtp from tests.server import FakeTransport -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config @implementer(interfaces.IMessageDelivery) @@ -110,3 +110,58 @@ class SendEmailHandlerTestCase(HomeserverTestCase): user, msg = message_delivery.messages.pop() self.assertEqual(str(user), "foo@bar.com") self.assertIn(b"Subject: test subject", msg) + + @override_config( + { + "email": { + "notif_from": "noreply@test", + "force_tls": True, + }, + } + ) + def test_send_email_force_tls(self): + """Happy-path test that we can send email to an Implicit TLS server.""" + h = self.hs.get_send_email_handler() + d = ensureDeferred( + h.send_email( + "foo@bar.com", "test subject", "Tests", "HTML content", "Text content" + ) + ) + # there should be an attempt to connect to localhost:465 + self.assertEqual(len(self.reactor.sslClients), 1) + ( + host, + port, + client_factory, + contextFactory, + _timeout, + _bindAddress, + ) = self.reactor.sslClients[0] + self.assertEqual(host, "localhost") + self.assertEqual(port, 465) + + # wire it up to an SMTP server + message_delivery = _DummyMessageDelivery() + server_protocol = smtp.ESMTP() + server_protocol.delivery = message_delivery + # make sure that the server uses the test reactor to set timeouts + server_protocol.callLater = self.reactor.callLater # type: ignore[assignment] + + client_protocol = client_factory.buildProtocol(None) + client_protocol.makeConnection(FakeTransport(server_protocol, self.reactor)) + server_protocol.makeConnection( + FakeTransport( + client_protocol, + self.reactor, + peer_address=IPv4Address("TCP", "127.0.0.1", 1234), + ) + ) + + # the message should now get delivered + self.get_success(d, by=0.1) + + # check it arrived + self.assertEqual(len(message_delivery.messages), 1) + user, msg = message_delivery.messages.pop() + self.assertEqual(str(user), "foo@bar.com") + self.assertIn(b"Subject: test subject", msg) -- cgit 1.4.1 From 935e73efed922993a360b4e1e10a2fa4a2f77c84 Mon Sep 17 00:00:00 2001 From: Matt Holt Date: Mon, 25 Jul 2022 10:07:26 -0600 Subject: Update Caddy reverse proxy documentation (#13344) Improve/simplify Caddy examples. Remove Caddy v1 (has long been EOL'ed) Signed-off-by: Matthew Holt --- changelog.d/13344.doc | 1 + docs/reverse_proxy.md | 53 +++++++++++---------------------------------------- 2 files changed, 12 insertions(+), 42 deletions(-) create mode 100644 changelog.d/13344.doc diff --git a/changelog.d/13344.doc b/changelog.d/13344.doc new file mode 100644 index 0000000000..fca187df92 --- /dev/null +++ b/changelog.d/13344.doc @@ -0,0 +1 @@ +Improve Caddy reverse proxy documentation. diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index 69caa8a73e..d1618e8155 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -79,63 +79,32 @@ server { } ``` -### Caddy v1 - -``` -matrix.example.com { - proxy /_matrix http://localhost:8008 { - transparent - } - - proxy /_synapse/client http://localhost:8008 { - transparent - } -} - -example.com:8448 { - proxy / http://localhost:8008 { - transparent - } -} -``` - ### Caddy v2 ``` matrix.example.com { - reverse_proxy /_matrix/* http://localhost:8008 - reverse_proxy /_synapse/client/* http://localhost:8008 + reverse_proxy /_matrix/* localhost:8008 + reverse_proxy /_synapse/client/* localhost:8008 } example.com:8448 { - reverse_proxy http://localhost:8008 + reverse_proxy localhost:8008 } ``` + [Delegation](delegate.md) example: -``` -(matrix-well-known-header) { - # Headers - header Access-Control-Allow-Origin "*" - header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" - header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization" - header Content-Type "application/json" -} +``` example.com { - handle /.well-known/matrix/server { - import matrix-well-known-header - respond `{"m.server":"matrix.example.com:443"}` - } - - handle /.well-known/matrix/client { - import matrix-well-known-header - respond `{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}` - } + header /.well-known/matrix/* Content-Type application/json + header /.well-known/matrix/* Access-Control-Allow-Origin * + respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}` + respond /.well-known/matrix/client `{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}` } matrix.example.com { - reverse_proxy /_matrix/* http://localhost:8008 - reverse_proxy /_synapse/client/* http://localhost:8008 + reverse_proxy /_matrix/* localhost:8008 + reverse_proxy /_synapse/client/* localhost:8008 } ``` -- cgit 1.4.1 From 549c55606acb3414409eb3b940a3457aee8a7a95 Mon Sep 17 00:00:00 2001 From: Doug <6060466+pixlwave@users.noreply.github.com> Date: Tue, 26 Jul 2022 09:08:20 +0100 Subject: Disable autocorrect and autocaptialisation when entering username for SSO registration. (#13350) When registering a new account via SSO on iOS, the text field becomes pretty annoying as it autocapitalises and autocorrects your input. This PR fixes that (although I have only tested the raw HTML file on the simulator, I'm not sure how to get the complete setup available for testing in the flow). --- changelog.d/13350.bugfix | 1 + synapse/res/templates/sso_auth_account_details.html | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13350.bugfix diff --git a/changelog.d/13350.bugfix b/changelog.d/13350.bugfix new file mode 100644 index 0000000000..46c496a6a0 --- /dev/null +++ b/changelog.d/13350.bugfix @@ -0,0 +1 @@ +Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index 1ba850369a..cf72df0a2a 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -138,7 +138,7 @@
@
- +
:{{ server_name }}
-- cgit 1.4.1 From 641412decde53bad393d478858355ae1db5306cf Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 26 Jul 2022 12:12:22 +0100 Subject: 1.64.0rc1 --- CHANGES.md | 90 +++++++++++++++++++++++++++++++++++++++++++++-- changelog.d/12942.misc | 1 - changelog.d/12943.misc | 1 - changelog.d/12967.removal | 1 - changelog.d/13038.feature | 1 - changelog.d/13094.misc | 1 - changelog.d/13172.misc | 1 - changelog.d/13175.misc | 1 - changelog.d/13192.removal | 1 - changelog.d/13198.misc | 1 - changelog.d/13205.feature | 1 - changelog.d/13208.feature | 1 - changelog.d/13215.misc | 1 - changelog.d/13218.misc | 1 - changelog.d/13220.feature | 1 - changelog.d/13224.misc | 1 - changelog.d/13231.doc | 1 - changelog.d/13233.doc | 1 - changelog.d/13239.removal | 1 - changelog.d/13240.misc | 1 - changelog.d/13242.misc | 1 - changelog.d/13251.misc | 1 - changelog.d/13253.misc | 1 - changelog.d/13254.misc | 1 - changelog.d/13255.misc | 1 - changelog.d/13257.misc | 1 - changelog.d/13258.misc | 1 - changelog.d/13260.misc | 1 - changelog.d/13261.doc | 1 - changelog.d/13263.bugfix | 1 - changelog.d/13266.misc | 1 - changelog.d/13267.misc | 1 - changelog.d/13270.bugfix | 1 - changelog.d/13271.doc | 1 - changelog.d/13274.misc | 1 - changelog.d/13276.feature | 1 - changelog.d/13278.bugfix | 1 - changelog.d/13279.misc | 1 - changelog.d/13281.misc | 1 - changelog.d/13284.misc | 1 - changelog.d/13285.misc | 1 - changelog.d/13292.misc | 1 - changelog.d/13296.bugfix | 1 - changelog.d/13297.misc | 1 - changelog.d/13299.misc | 1 - changelog.d/13300.misc | 1 - changelog.d/13303.misc | 1 - changelog.d/13307.misc | 1 - changelog.d/13308.misc | 1 - changelog.d/13310.misc | 1 - changelog.d/13311.misc | 1 - changelog.d/13313.misc | 1 - changelog.d/13314.doc | 1 - changelog.d/13317.feature | 1 - changelog.d/13318.misc | 1 - changelog.d/13320.misc | 1 - changelog.d/13323.misc | 1 - changelog.d/13324.misc | 1 - changelog.d/13326.removal | 1 - changelog.d/13328.misc | 1 - changelog.d/13329.misc | 1 - changelog.d/13333.doc | 1 - changelog.d/13338.doc | 1 - changelog.d/13342.misc | 1 - changelog.d/13344.doc | 1 - changelog.d/13345.misc | 1 - changelog.d/13349.misc | 1 - changelog.d/13350.bugfix | 1 - changelog.d/13352.bugfix | 1 - changelog.d/13354.misc | 1 - changelog.d/13362.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 73 files changed, 95 insertions(+), 73 deletions(-) delete mode 100644 changelog.d/12942.misc delete mode 100644 changelog.d/12943.misc delete mode 100644 changelog.d/12967.removal delete mode 100644 changelog.d/13038.feature delete mode 100644 changelog.d/13094.misc delete mode 100644 changelog.d/13172.misc delete mode 100644 changelog.d/13175.misc delete mode 100644 changelog.d/13192.removal delete mode 100644 changelog.d/13198.misc delete mode 100644 changelog.d/13205.feature delete mode 100644 changelog.d/13208.feature delete mode 100644 changelog.d/13215.misc delete mode 100644 changelog.d/13218.misc delete mode 100644 changelog.d/13220.feature delete mode 100644 changelog.d/13224.misc delete mode 100644 changelog.d/13231.doc delete mode 100644 changelog.d/13233.doc delete mode 100644 changelog.d/13239.removal delete mode 100644 changelog.d/13240.misc delete mode 100644 changelog.d/13242.misc delete mode 100644 changelog.d/13251.misc delete mode 100644 changelog.d/13253.misc delete mode 100644 changelog.d/13254.misc delete mode 100644 changelog.d/13255.misc delete mode 100644 changelog.d/13257.misc delete mode 100644 changelog.d/13258.misc delete mode 100644 changelog.d/13260.misc delete mode 100644 changelog.d/13261.doc delete mode 100644 changelog.d/13263.bugfix delete mode 100644 changelog.d/13266.misc delete mode 100644 changelog.d/13267.misc delete mode 100644 changelog.d/13270.bugfix delete mode 100644 changelog.d/13271.doc delete mode 100644 changelog.d/13274.misc delete mode 100644 changelog.d/13276.feature delete mode 100644 changelog.d/13278.bugfix delete mode 100644 changelog.d/13279.misc delete mode 100644 changelog.d/13281.misc delete mode 100644 changelog.d/13284.misc delete mode 100644 changelog.d/13285.misc delete mode 100644 changelog.d/13292.misc delete mode 100644 changelog.d/13296.bugfix delete mode 100644 changelog.d/13297.misc delete mode 100644 changelog.d/13299.misc delete mode 100644 changelog.d/13300.misc delete mode 100644 changelog.d/13303.misc delete mode 100644 changelog.d/13307.misc delete mode 100644 changelog.d/13308.misc delete mode 100644 changelog.d/13310.misc delete mode 100644 changelog.d/13311.misc delete mode 100644 changelog.d/13313.misc delete mode 100644 changelog.d/13314.doc delete mode 100644 changelog.d/13317.feature delete mode 100644 changelog.d/13318.misc delete mode 100644 changelog.d/13320.misc delete mode 100644 changelog.d/13323.misc delete mode 100644 changelog.d/13324.misc delete mode 100644 changelog.d/13326.removal delete mode 100644 changelog.d/13328.misc delete mode 100644 changelog.d/13329.misc delete mode 100644 changelog.d/13333.doc delete mode 100644 changelog.d/13338.doc delete mode 100644 changelog.d/13342.misc delete mode 100644 changelog.d/13344.doc delete mode 100644 changelog.d/13345.misc delete mode 100644 changelog.d/13349.misc delete mode 100644 changelog.d/13350.bugfix delete mode 100644 changelog.d/13352.bugfix delete mode 100644 changelog.d/13354.misc delete mode 100644 changelog.d/13362.misc diff --git a/CHANGES.md b/CHANGES.md index 1d123abc19..979e35370a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,9 +1,95 @@ -Synapse vNext -============= +Synapse 1.64.0rc1 (2022-07-26) +============================== As of this release, Synapse no longer allows the tasks of verifying email address ownership, and password reset confirmation, to be delegated to an identity server. For more information, see the [upgrade notes](https://matrix-org.github.io/synapse/v1.64/upgrade.html#upgrading-to-v1640). +Features +-------- + +- Provide more info why we don't have any thumbnails to serve. ([\#13038](https://github.com/matrix-org/synapse/issues/13038)) +- Allow pagination from remote event after discovering it from MSC3030 `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205)) +- Add a `room_type` field in the responses for the list room and room details admin API. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208)) +- Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220)) +- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttle additional joins if that rate grows too large. ([\#13276](https://github.com/matrix-org/synapse/issues/13276)) +- Support Implicit TLS for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the "enable_email_notifs" and "email_notifs_for_new_users" options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263)) +- Fix a bug introduced in Synapse 1.40 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270)) +- Fix long-standing bug where in rare instances Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278)) +- Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296)) +- Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350)) +- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13352](https://github.com/matrix-org/synapse/issues/13352)) + + +Improved Documentation +---------------------- + +- Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231)) +- Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261)) +- Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271)) +- Add notes when config options where changed. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314)) +- Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333)) +- Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338)) +- Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344)) + + +Deprecations and Removals +------------------------- + +- Drop tables used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967)) +- Drop support for delegating email verification to an external server. ([\#13192](https://github.com/matrix-org/synapse/issues/13192)) +- Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13239](https://github.com/matrix-org/synapse/issues/13239)) +- Stop builindg `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326)) + + +Internal Changes +---------------- + +- Use lower isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942)) +- Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. ([\#12943](https://github.com/matrix-org/synapse/issues/12943)) +- Make the AS login method call `Auth.get_user_by_req` for checking the AS token. ([\#13094](https://github.com/matrix-org/synapse/issues/13094)) +- Always use a version of canonicaljson that supports the C implementation of frozendict. ([\#13172](https://github.com/matrix-org/synapse/issues/13172)) +- Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. ([\#13175](https://github.com/matrix-org/synapse/issues/13175)) +- Refactor receipts servlet logic to avoid duplicated code. ([\#13198](https://github.com/matrix-org/synapse/issues/13198)) +- Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. ([\#13215](https://github.com/matrix-org/synapse/issues/13215)) +- Remove unused database table `event_reference_hashes`. ([\#13218](https://github.com/matrix-org/synapse/issues/13218)) +- Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). ([\#13224](https://github.com/matrix-org/synapse/issues/13224)) +- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. ([\#13240](https://github.com/matrix-org/synapse/issues/13240)) +- Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13242](https://github.com/matrix-org/synapse/issues/13242), [\#13308](https://github.com/matrix-org/synapse/issues/13308)) +- Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13251](https://github.com/matrix-org/synapse/issues/13251)) +- Preparatory work for a per-room rate limiter on joins. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255)) +- Log the stack when waiting for an entire room to be un-partial stated. ([\#13257](https://github.com/matrix-org/synapse/issues/13257)) +- Fix spurious warning when fetching state after a missing prev event. ([\#13258](https://github.com/matrix-org/synapse/issues/13258)) +- Clean-up tests for notifications. ([\#13260](https://github.com/matrix-org/synapse/issues/13260)) +- Do not fail build if complement with workers fails. ([\#13266](https://github.com/matrix-org/synapse/issues/13266)) +- Don't pull out state in `compute_event_context` for unconflicted state. ([\#13267](https://github.com/matrix-org/synapse/issues/13267), [\#13274](https://github.com/matrix-org/synapse/issues/13274)) +- Reduce the rebuild time for the complement-synapse docker image. ([\#13279](https://github.com/matrix-org/synapse/issues/13279)) +- Don't pull out the full state when creating an event. ([\#13281](https://github.com/matrix-org/synapse/issues/13281), [\#13307](https://github.com/matrix-org/synapse/issues/13307)) +- Update locked version of `frozendict` to 2.3.2, which has a fix for a memory leak. ([\#13284](https://github.com/matrix-org/synapse/issues/13284)) +- Upgrade from Poetry 1.1.14 to 1.1.12, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285)) +- Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. ([\#13292](https://github.com/matrix-org/synapse/issues/13292)) +- Use `HTTPStatus` constants in place of literals in tests. ([\#13297](https://github.com/matrix-org/synapse/issues/13297)) +- Improve performance of query `_get_subset_users_in_room_with_profiles`. ([\#13299](https://github.com/matrix-org/synapse/issues/13299)) +- Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. ([\#13300](https://github.com/matrix-org/synapse/issues/13300)) +- Remove unnecessary `json.dumps` from tests. ([\#13303](https://github.com/matrix-org/synapse/issues/13303)) +- Reduce memory usage of sending dummy events. ([\#13310](https://github.com/matrix-org/synapse/issues/13310)) +- Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. ([\#13311](https://github.com/matrix-org/synapse/issues/13311)) +- Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. ([\#13313](https://github.com/matrix-org/synapse/issues/13313)) +- Validate federation destinations and log an error if a destination is invalid. ([\#13318](https://github.com/matrix-org/synapse/issues/13318)) +- Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. ([\#13320](https://github.com/matrix-org/synapse/issues/13320)) +- Reduce memory usage of state caches. ([\#13323](https://github.com/matrix-org/synapse/issues/13323)) +- Reduce the amount of state we store in the `state_cache`. ([\#13324](https://github.com/matrix-org/synapse/issues/13324)) +- Add missing type hints to open tracing module. ([\#13328](https://github.com/matrix-org/synapse/issues/13328), [\#13345](https://github.com/matrix-org/synapse/issues/13345), [\#13362](https://github.com/matrix-org/synapse/issues/13362)) +- Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). ([\#13329](https://github.com/matrix-org/synapse/issues/13329), [\#13349](https://github.com/matrix-org/synapse/issues/13349)) +- When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. ([\#13342](https://github.com/matrix-org/synapse/issues/13342)) +- Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. ([\#13354](https://github.com/matrix-org/synapse/issues/13354)) + + Synapse 1.63.1 (2022-07-20) =========================== diff --git a/changelog.d/12942.misc b/changelog.d/12942.misc deleted file mode 100644 index acb2558d57..0000000000 --- a/changelog.d/12942.misc +++ /dev/null @@ -1 +0,0 @@ -Use lower isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. diff --git a/changelog.d/12943.misc b/changelog.d/12943.misc deleted file mode 100644 index f66bb3ec32..0000000000 --- a/changelog.d/12943.misc +++ /dev/null @@ -1 +0,0 @@ -Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. diff --git a/changelog.d/12967.removal b/changelog.d/12967.removal deleted file mode 100644 index 0aafd6a4d9..0000000000 --- a/changelog.d/12967.removal +++ /dev/null @@ -1 +0,0 @@ -Drop tables used for groups/communities. diff --git a/changelog.d/13038.feature b/changelog.d/13038.feature deleted file mode 100644 index 1278f1b4e9..0000000000 --- a/changelog.d/13038.feature +++ /dev/null @@ -1 +0,0 @@ -Provide more info why we don't have any thumbnails to serve. diff --git a/changelog.d/13094.misc b/changelog.d/13094.misc deleted file mode 100644 index f1e55ae476..0000000000 --- a/changelog.d/13094.misc +++ /dev/null @@ -1 +0,0 @@ -Make the AS login method call `Auth.get_user_by_req` for checking the AS token. diff --git a/changelog.d/13172.misc b/changelog.d/13172.misc deleted file mode 100644 index 124a1b3662..0000000000 --- a/changelog.d/13172.misc +++ /dev/null @@ -1 +0,0 @@ -Always use a version of canonicaljson that supports the C implementation of frozendict. diff --git a/changelog.d/13175.misc b/changelog.d/13175.misc deleted file mode 100644 index f273b3d6ca..0000000000 --- a/changelog.d/13175.misc +++ /dev/null @@ -1 +0,0 @@ -Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. diff --git a/changelog.d/13192.removal b/changelog.d/13192.removal deleted file mode 100644 index a7dffd1c48..0000000000 --- a/changelog.d/13192.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for delegating email verification to an external server. diff --git a/changelog.d/13198.misc b/changelog.d/13198.misc deleted file mode 100644 index 5aef2432df..0000000000 --- a/changelog.d/13198.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor receipts servlet logic to avoid duplicated code. diff --git a/changelog.d/13205.feature b/changelog.d/13205.feature deleted file mode 100644 index d89aa9aa75..0000000000 --- a/changelog.d/13205.feature +++ /dev/null @@ -1 +0,0 @@ -Allow pagination from remote event after discovering it from MSC3030 `/timestamp_to_event`. diff --git a/changelog.d/13208.feature b/changelog.d/13208.feature deleted file mode 100644 index b0c5f090ee..0000000000 --- a/changelog.d/13208.feature +++ /dev/null @@ -1 +0,0 @@ -Add a `room_type` field in the responses for the list room and room details admin API. Contributed by @andrewdoh. \ No newline at end of file diff --git a/changelog.d/13215.misc b/changelog.d/13215.misc deleted file mode 100644 index 3da35addb3..0000000000 --- a/changelog.d/13215.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. diff --git a/changelog.d/13218.misc b/changelog.d/13218.misc deleted file mode 100644 index b1c8e5c747..0000000000 --- a/changelog.d/13218.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused database table `event_reference_hashes`. diff --git a/changelog.d/13220.feature b/changelog.d/13220.feature deleted file mode 100644 index 9b0240fdc8..0000000000 --- a/changelog.d/13220.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for room version 10. diff --git a/changelog.d/13224.misc b/changelog.d/13224.misc deleted file mode 100644 index 41f8693b74..0000000000 --- a/changelog.d/13224.misc +++ /dev/null @@ -1 +0,0 @@ -Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13231.doc b/changelog.d/13231.doc deleted file mode 100644 index e750f9da49..0000000000 --- a/changelog.d/13231.doc +++ /dev/null @@ -1 +0,0 @@ -Provide an example of using the Admin API. Contributed by @jejo86. diff --git a/changelog.d/13233.doc b/changelog.d/13233.doc deleted file mode 100644 index 3eaea7c5e3..0000000000 --- a/changelog.d/13233.doc +++ /dev/null @@ -1 +0,0 @@ -Move the documentation for how URL previews work to the URL preview module. diff --git a/changelog.d/13239.removal b/changelog.d/13239.removal deleted file mode 100644 index 8f6045176d..0000000000 --- a/changelog.d/13239.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. \ No newline at end of file diff --git a/changelog.d/13240.misc b/changelog.d/13240.misc deleted file mode 100644 index 0567e47d64..0000000000 --- a/changelog.d/13240.misc +++ /dev/null @@ -1 +0,0 @@ -Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. \ No newline at end of file diff --git a/changelog.d/13242.misc b/changelog.d/13242.misc deleted file mode 100644 index 7f8ec0815f..0000000000 --- a/changelog.d/13242.misc +++ /dev/null @@ -1 +0,0 @@ -Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13251.misc b/changelog.d/13251.misc deleted file mode 100644 index 526369e403..0000000000 --- a/changelog.d/13251.misc +++ /dev/null @@ -1 +0,0 @@ -Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13253.misc b/changelog.d/13253.misc deleted file mode 100644 index cba6b9ee0f..0000000000 --- a/changelog.d/13253.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for a per-room rate limiter on joins. diff --git a/changelog.d/13254.misc b/changelog.d/13254.misc deleted file mode 100644 index cba6b9ee0f..0000000000 --- a/changelog.d/13254.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for a per-room rate limiter on joins. diff --git a/changelog.d/13255.misc b/changelog.d/13255.misc deleted file mode 100644 index cba6b9ee0f..0000000000 --- a/changelog.d/13255.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for a per-room rate limiter on joins. diff --git a/changelog.d/13257.misc b/changelog.d/13257.misc deleted file mode 100644 index 5fc1388520..0000000000 --- a/changelog.d/13257.misc +++ /dev/null @@ -1 +0,0 @@ -Log the stack when waiting for an entire room to be un-partial stated. diff --git a/changelog.d/13258.misc b/changelog.d/13258.misc deleted file mode 100644 index a187c46aa6..0000000000 --- a/changelog.d/13258.misc +++ /dev/null @@ -1 +0,0 @@ -Fix spurious warning when fetching state after a missing prev event. diff --git a/changelog.d/13260.misc b/changelog.d/13260.misc deleted file mode 100644 index b55ff32c76..0000000000 --- a/changelog.d/13260.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up tests for notifications. diff --git a/changelog.d/13261.doc b/changelog.d/13261.doc deleted file mode 100644 index 3eaea7c5e3..0000000000 --- a/changelog.d/13261.doc +++ /dev/null @@ -1 +0,0 @@ -Move the documentation for how URL previews work to the URL preview module. diff --git a/changelog.d/13263.bugfix b/changelog.d/13263.bugfix deleted file mode 100644 index 91e1d1e7eb..0000000000 --- a/changelog.d/13263.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the "enable_email_notifs" and "email_notifs_for_new_users" options were enabled. Contributed by @thomasweston12. diff --git a/changelog.d/13266.misc b/changelog.d/13266.misc deleted file mode 100644 index d583acb81b..0000000000 --- a/changelog.d/13266.misc +++ /dev/null @@ -1 +0,0 @@ -Do not fail build if complement with workers fails. diff --git a/changelog.d/13267.misc b/changelog.d/13267.misc deleted file mode 100644 index a334414320..0000000000 --- a/changelog.d/13267.misc +++ /dev/null @@ -1 +0,0 @@ -Don't pull out state in `compute_event_context` for unconflicted state. diff --git a/changelog.d/13270.bugfix b/changelog.d/13270.bugfix deleted file mode 100644 index d023b25eea..0000000000 --- a/changelog.d/13270.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.40 where a user invited to a restricted room would be briefly unable to join. diff --git a/changelog.d/13271.doc b/changelog.d/13271.doc deleted file mode 100644 index b50e60d029..0000000000 --- a/changelog.d/13271.doc +++ /dev/null @@ -1 +0,0 @@ -Add another `contrib` script to help set up worker processes. Contributed by @villepeh. diff --git a/changelog.d/13274.misc b/changelog.d/13274.misc deleted file mode 100644 index a334414320..0000000000 --- a/changelog.d/13274.misc +++ /dev/null @@ -1 +0,0 @@ -Don't pull out state in `compute_event_context` for unconflicted state. diff --git a/changelog.d/13276.feature b/changelog.d/13276.feature deleted file mode 100644 index 068d158ed5..0000000000 --- a/changelog.d/13276.feature +++ /dev/null @@ -1 +0,0 @@ -Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttle additional joins if that rate grows too large. diff --git a/changelog.d/13278.bugfix b/changelog.d/13278.bugfix deleted file mode 100644 index 49e9377c79..0000000000 --- a/changelog.d/13278.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where in rare instances Synapse could store the incorrect state for a room after a state resolution. diff --git a/changelog.d/13279.misc b/changelog.d/13279.misc deleted file mode 100644 index a083d2af2a..0000000000 --- a/changelog.d/13279.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the rebuild time for the complement-synapse docker image. diff --git a/changelog.d/13281.misc b/changelog.d/13281.misc deleted file mode 100644 index dea51d1362..0000000000 --- a/changelog.d/13281.misc +++ /dev/null @@ -1 +0,0 @@ -Don't pull out the full state when creating an event. diff --git a/changelog.d/13284.misc b/changelog.d/13284.misc deleted file mode 100644 index fa9743a10e..0000000000 --- a/changelog.d/13284.misc +++ /dev/null @@ -1 +0,0 @@ -Update locked version of `frozendict` to 2.3.2, which has a fix for a memory leak. diff --git a/changelog.d/13285.misc b/changelog.d/13285.misc deleted file mode 100644 index b7bcbadb5b..0000000000 --- a/changelog.d/13285.misc +++ /dev/null @@ -1 +0,0 @@ -Upgrade from Poetry 1.1.14 to 1.1.12, to fix bugs when locking packages. diff --git a/changelog.d/13292.misc b/changelog.d/13292.misc deleted file mode 100644 index 67fec55330..0000000000 --- a/changelog.d/13292.misc +++ /dev/null @@ -1 +0,0 @@ -Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. diff --git a/changelog.d/13296.bugfix b/changelog.d/13296.bugfix deleted file mode 100644 index ff0eb2b4a1..0000000000 --- a/changelog.d/13296.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. diff --git a/changelog.d/13297.misc b/changelog.d/13297.misc deleted file mode 100644 index 545a62369f..0000000000 --- a/changelog.d/13297.misc +++ /dev/null @@ -1 +0,0 @@ -Use `HTTPStatus` constants in place of literals in tests. \ No newline at end of file diff --git a/changelog.d/13299.misc b/changelog.d/13299.misc deleted file mode 100644 index a9d5566873..0000000000 --- a/changelog.d/13299.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of query `_get_subset_users_in_room_with_profiles`. diff --git a/changelog.d/13300.misc b/changelog.d/13300.misc deleted file mode 100644 index ee58add3c4..0000000000 --- a/changelog.d/13300.misc +++ /dev/null @@ -1 +0,0 @@ -Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. diff --git a/changelog.d/13303.misc b/changelog.d/13303.misc deleted file mode 100644 index 03f64ab171..0000000000 --- a/changelog.d/13303.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary `json.dumps` from tests. \ No newline at end of file diff --git a/changelog.d/13307.misc b/changelog.d/13307.misc deleted file mode 100644 index 45b628ce13..0000000000 --- a/changelog.d/13307.misc +++ /dev/null @@ -1 +0,0 @@ -Don't pull out the full state when creating an event. \ No newline at end of file diff --git a/changelog.d/13308.misc b/changelog.d/13308.misc deleted file mode 100644 index 7f8ec0815f..0000000000 --- a/changelog.d/13308.misc +++ /dev/null @@ -1 +0,0 @@ -Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13310.misc b/changelog.d/13310.misc deleted file mode 100644 index eaf570e058..0000000000 --- a/changelog.d/13310.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory usage of sending dummy events. diff --git a/changelog.d/13311.misc b/changelog.d/13311.misc deleted file mode 100644 index 4be81c675c..0000000000 --- a/changelog.d/13311.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. \ No newline at end of file diff --git a/changelog.d/13313.misc b/changelog.d/13313.misc deleted file mode 100644 index 0f3c1f0afd..0000000000 --- a/changelog.d/13313.misc +++ /dev/null @@ -1 +0,0 @@ -Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. diff --git a/changelog.d/13314.doc b/changelog.d/13314.doc deleted file mode 100644 index 75c71ef27a..0000000000 --- a/changelog.d/13314.doc +++ /dev/null @@ -1 +0,0 @@ -Add notes when config options where changed. Contributed by @behrmann. diff --git a/changelog.d/13317.feature b/changelog.d/13317.feature deleted file mode 100644 index e0ebd2b51f..0000000000 --- a/changelog.d/13317.feature +++ /dev/null @@ -1 +0,0 @@ -Support Implicit TLS for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. diff --git a/changelog.d/13318.misc b/changelog.d/13318.misc deleted file mode 100644 index f5cd26b862..0000000000 --- a/changelog.d/13318.misc +++ /dev/null @@ -1 +0,0 @@ -Validate federation destinations and log an error if a destination is invalid. diff --git a/changelog.d/13320.misc b/changelog.d/13320.misc deleted file mode 100644 index d33cf3a25a..0000000000 --- a/changelog.d/13320.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. diff --git a/changelog.d/13323.misc b/changelog.d/13323.misc deleted file mode 100644 index 3caa94a2f6..0000000000 --- a/changelog.d/13323.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory usage of state caches. diff --git a/changelog.d/13324.misc b/changelog.d/13324.misc deleted file mode 100644 index 30670cf56c..0000000000 --- a/changelog.d/13324.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the amount of state we store in the `state_cache`. diff --git a/changelog.d/13326.removal b/changelog.d/13326.removal deleted file mode 100644 index 8112286671..0000000000 --- a/changelog.d/13326.removal +++ /dev/null @@ -1 +0,0 @@ -Stop builindg `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. diff --git a/changelog.d/13328.misc b/changelog.d/13328.misc deleted file mode 100644 index c80578ce95..0000000000 --- a/changelog.d/13328.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to open tracing module. diff --git a/changelog.d/13329.misc b/changelog.d/13329.misc deleted file mode 100644 index 4df9a9f6d7..0000000000 --- a/changelog.d/13329.misc +++ /dev/null @@ -1 +0,0 @@ -Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13333.doc b/changelog.d/13333.doc deleted file mode 100644 index 57cbdf05c8..0000000000 --- a/changelog.d/13333.doc +++ /dev/null @@ -1 +0,0 @@ -Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. \ No newline at end of file diff --git a/changelog.d/13338.doc b/changelog.d/13338.doc deleted file mode 100644 index 7acf6d3f34..0000000000 --- a/changelog.d/13338.doc +++ /dev/null @@ -1 +0,0 @@ -Mention that BuildKit is needed when building Docker images for tests. diff --git a/changelog.d/13342.misc b/changelog.d/13342.misc deleted file mode 100644 index ce9c816b9c..0000000000 --- a/changelog.d/13342.misc +++ /dev/null @@ -1 +0,0 @@ -When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. diff --git a/changelog.d/13344.doc b/changelog.d/13344.doc deleted file mode 100644 index fca187df92..0000000000 --- a/changelog.d/13344.doc +++ /dev/null @@ -1 +0,0 @@ -Improve Caddy reverse proxy documentation. diff --git a/changelog.d/13345.misc b/changelog.d/13345.misc deleted file mode 100644 index c80578ce95..0000000000 --- a/changelog.d/13345.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to open tracing module. diff --git a/changelog.d/13349.misc b/changelog.d/13349.misc deleted file mode 100644 index 4df9a9f6d7..0000000000 --- a/changelog.d/13349.misc +++ /dev/null @@ -1 +0,0 @@ -Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13350.bugfix b/changelog.d/13350.bugfix deleted file mode 100644 index 46c496a6a0..0000000000 --- a/changelog.d/13350.bugfix +++ /dev/null @@ -1 +0,0 @@ -Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. diff --git a/changelog.d/13352.bugfix b/changelog.d/13352.bugfix deleted file mode 100644 index 8128714299..0000000000 --- a/changelog.d/13352.bugfix +++ /dev/null @@ -1 +0,0 @@ -Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. diff --git a/changelog.d/13354.misc b/changelog.d/13354.misc deleted file mode 100644 index e08ee7866a..0000000000 --- a/changelog.d/13354.misc +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. diff --git a/changelog.d/13362.misc b/changelog.d/13362.misc deleted file mode 100644 index c80578ce95..0000000000 --- a/changelog.d/13362.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to open tracing module. diff --git a/debian/changelog b/debian/changelog index 9417f8714f..38030a5e19 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.64.0~rc1) stable; urgency=medium + + * New Synapse release 1.64.0rc1. + + -- Synapse Packaging team Tue, 26 Jul 2022 12:11:49 +0100 + matrix-synapse-py3 (1.63.1) stable; urgency=medium * New Synapse release 1.63.1. diff --git a/pyproject.toml b/pyproject.toml index 4da1331c93..c707737590 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.63.1" +version = "1.64.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From 8b603299bf2d15bef1db867e08ac90c0f753cc32 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 26 Jul 2022 07:19:20 -0400 Subject: Remove unused argument for get_relations_for_event. (#13383) --- changelog.d/13383.misc | 1 + synapse/handlers/relations.py | 3 --- synapse/storage/databases/main/relations.py | 6 ------ 3 files changed, 1 insertion(+), 9 deletions(-) create mode 100644 changelog.d/13383.misc diff --git a/changelog.d/13383.misc b/changelog.d/13383.misc new file mode 100644 index 0000000000..2236eced24 --- /dev/null +++ b/changelog.d/13383.misc @@ -0,0 +1 @@ +Remove an unused argument to `get_relations_for_event`. diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 0b63cd2186..8f797e3ae9 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -73,7 +73,6 @@ class RelationsHandler: room_id: str, relation_type: Optional[str] = None, event_type: Optional[str] = None, - aggregation_key: Optional[str] = None, limit: int = 5, direction: str = "b", from_token: Optional[StreamToken] = None, @@ -89,7 +88,6 @@ class RelationsHandler: room_id: The room the event belongs to. relation_type: Only fetch events with this relation type, if given. event_type: Only fetch events with this event type, if given. - aggregation_key: Only fetch events with this aggregation key, if given. limit: Only fetch the most recent `limit` events. direction: Whether to fetch the most recent first (`"b"`) or the oldest first (`"f"`). @@ -122,7 +120,6 @@ class RelationsHandler: room_id=room_id, relation_type=relation_type, event_type=event_type, - aggregation_key=aggregation_key, limit=limit, direction=direction, from_token=from_token, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index b457bc189e..7bd27790eb 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -62,7 +62,6 @@ class RelationsWorkerStore(SQLBaseStore): room_id: str, relation_type: Optional[str] = None, event_type: Optional[str] = None, - aggregation_key: Optional[str] = None, limit: int = 5, direction: str = "b", from_token: Optional[StreamToken] = None, @@ -76,7 +75,6 @@ class RelationsWorkerStore(SQLBaseStore): room_id: The room the event belongs to. relation_type: Only fetch events with this relation type, if given. event_type: Only fetch events with this event type, if given. - aggregation_key: Only fetch events with this aggregation key, if given. limit: Only fetch the most recent `limit` events. direction: Whether to fetch the most recent first (`"b"`) or the oldest first (`"f"`). @@ -105,10 +103,6 @@ class RelationsWorkerStore(SQLBaseStore): where_clause.append("type = ?") where_args.append(event_type) - if aggregation_key: - where_clause.append("aggregation_key = ?") - where_args.append(aggregation_key) - pagination_clause = generate_pagination_where_clause( direction=direction, column_names=("topological_ordering", "stream_ordering"), -- cgit 1.4.1 From f765a40f693bf492af3beb3d7945452142fa46a0 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 26 Jul 2022 12:26:36 +0100 Subject: Tweak changelog --- CHANGES.md | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 979e35370a..5623665cbb 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,24 +3,26 @@ Synapse 1.64.0rc1 (2022-07-26) As of this release, Synapse no longer allows the tasks of verifying email address ownership, and password reset confirmation, to be delegated to an identity server. For more information, see the [upgrade notes](https://matrix-org.github.io/synapse/v1.64/upgrade.html#upgrading-to-v1640). +We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longer an active version of Ubuntu. + Features -------- - Provide more info why we don't have any thumbnails to serve. ([\#13038](https://github.com/matrix-org/synapse/issues/13038)) - Allow pagination from remote event after discovering it from MSC3030 `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205)) -- Add a `room_type` field in the responses for the list room and room details admin API. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208)) +- Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208)) - Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220)) -- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttle additional joins if that rate grows too large. ([\#13276](https://github.com/matrix-org/synapse/issues/13276)) +- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13276](https://github.com/matrix-org/synapse/issues/13276)) - Support Implicit TLS for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317)) Bugfixes -------- -- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the "enable_email_notifs" and "email_notifs_for_new_users" options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263)) -- Fix a bug introduced in Synapse 1.40 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270)) -- Fix long-standing bug where in rare instances Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278)) +- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the `enable_email_notifs` and `email_notifs_for_new_users` options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263)) +- Fix a bug introduced in Synapse 1.40.0 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270)) +- Fix a long-standing bug where, in rare instances, Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278)) - Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296)) - Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350)) - Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13352](https://github.com/matrix-org/synapse/issues/13352)) @@ -32,7 +34,7 @@ Improved Documentation - Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231)) - Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261)) - Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271)) -- Add notes when config options where changed. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314)) +- Add notes when config options were changed. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314)) - Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333)) - Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338)) - Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344)) @@ -41,16 +43,16 @@ Improved Documentation Deprecations and Removals ------------------------- -- Drop tables used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967)) +- Drop tables that were formerly used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967)) - Drop support for delegating email verification to an external server. ([\#13192](https://github.com/matrix-org/synapse/issues/13192)) - Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13239](https://github.com/matrix-org/synapse/issues/13239)) -- Stop builindg `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326)) +- Stop building `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326)) Internal Changes ---------------- -- Use lower isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942)) +- Use lower transaction isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942)) - Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. ([\#12943](https://github.com/matrix-org/synapse/issues/12943)) - Make the AS login method call `Auth.get_user_by_req` for checking the AS token. ([\#13094](https://github.com/matrix-org/synapse/issues/13094)) - Always use a version of canonicaljson that supports the C implementation of frozendict. ([\#13172](https://github.com/matrix-org/synapse/issues/13172)) -- cgit 1.4.1 From 335ebb21ccc0ae906169f21dcfc456c869bdd301 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 26 Jul 2022 12:39:23 +0100 Subject: Faster room joins: avoid blocking when pulling events with missing prevs (#13355) Avoid blocking on full state in `_resolve_state_at_missing_prevs` and return a new flag indicating whether the resolved state is partial. Thread that flag around so that it makes it into the event context. Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/13355.misc | 1 + synapse/handlers/federation_event.py | 116 +++++++++++++++++++++++++++-------- synapse/handlers/message.py | 4 ++ synapse/state/__init__.py | 18 ++++-- synapse/storage/controllers/state.py | 8 ++- tests/handlers/test_federation.py | 1 + tests/storage/test_events.py | 7 ++- tests/test_state.py | 2 + 8 files changed, 124 insertions(+), 33 deletions(-) create mode 100644 changelog.d/13355.misc diff --git a/changelog.d/13355.misc b/changelog.d/13355.misc new file mode 100644 index 0000000000..7715075885 --- /dev/null +++ b/changelog.d/13355.misc @@ -0,0 +1 @@ +Faster room joins: avoid blocking when pulling events with partially missing prev events. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 16f20c8be7..fc1254d2ad 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -278,7 +278,9 @@ class FederationEventHandler: ) try: - await self._process_received_pdu(origin, pdu, state_ids=None) + await self._process_received_pdu( + origin, pdu, state_ids=None, partial_state=None + ) except PartialStateConflictError: # The room was un-partial stated while we were processing the PDU. # Try once more, with full state this time. @@ -286,7 +288,9 @@ class FederationEventHandler: "Room %s was un-partial stated while processing the PDU, trying again.", room_id, ) - await self._process_received_pdu(origin, pdu, state_ids=None) + await self._process_received_pdu( + origin, pdu, state_ids=None, partial_state=None + ) async def on_send_membership_event( self, origin: str, event: EventBase @@ -534,14 +538,36 @@ class FederationEventHandler: # # This is the same operation as we do when we receive a regular event # over federation. - state_ids = await self._resolve_state_at_missing_prevs(destination, event) - - # build a new state group for it if need be - context = await self._state_handler.compute_event_context( - event, - state_ids_before_event=state_ids, + state_ids, partial_state = await self._resolve_state_at_missing_prevs( + destination, event ) - if context.partial_state: + + # There are three possible cases for (state_ids, partial_state): + # * `state_ids` and `partial_state` are both `None` if we had all the + # prev_events. The prev_events may or may not have partial state and + # we won't know until we compute the event context. + # * `state_ids` is not `None` and `partial_state` is `False` if we were + # missing some prev_events (but we have full state for any we did + # have). We calculated the full state after the prev_events. + # * `state_ids` is not `None` and `partial_state` is `True` if we were + # missing some, but not all, prev_events. At least one of the + # prev_events we did have had partial state, so we calculated a partial + # state after the prev_events. + + context = None + if state_ids is not None and partial_state: + # the state after the prev events is still partial. We can't de-partial + # state the event, so don't bother building the event context. + pass + else: + # build a new state group for it if need be + context = await self._state_handler.compute_event_context( + event, + state_ids_before_event=state_ids, + partial_state=partial_state, + ) + + if context is None or context.partial_state: # this can happen if some or all of the event's prev_events still have # partial state - ie, an event has an earlier stream_ordering than one # or more of its prev_events, so we de-partial-state it before its @@ -806,14 +832,39 @@ class FederationEventHandler: return try: - state_ids = await self._resolve_state_at_missing_prevs(origin, event) - # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does - # not return partial state - # https://github.com/matrix-org/synapse/issues/13002 + try: + state_ids, partial_state = await self._resolve_state_at_missing_prevs( + origin, event + ) + await self._process_received_pdu( + origin, + event, + state_ids=state_ids, + partial_state=partial_state, + backfilled=backfilled, + ) + except PartialStateConflictError: + # The room was un-partial stated while we were processing the event. + # Try once more, with full state this time. + state_ids, partial_state = await self._resolve_state_at_missing_prevs( + origin, event + ) - await self._process_received_pdu( - origin, event, state_ids=state_ids, backfilled=backfilled - ) + # We ought to have full state now, barring some unlikely race where we left and + # rejoned the room in the background. + if state_ids is not None and partial_state: + raise AssertionError( + f"Event {event.event_id} still has a partial resolved state " + f"after room {event.room_id} was un-partial stated" + ) + + await self._process_received_pdu( + origin, + event, + state_ids=state_ids, + partial_state=partial_state, + backfilled=backfilled, + ) except FederationError as e: if e.code == 403: logger.warning("Pulled event %s failed history check.", event_id) @@ -822,7 +873,7 @@ class FederationEventHandler: async def _resolve_state_at_missing_prevs( self, dest: str, event: EventBase - ) -> Optional[StateMap[str]]: + ) -> Tuple[Optional[StateMap[str]], Optional[bool]]: """Calculate the state at an event with missing prev_events. This is used when we have pulled a batch of events from a remote server, and @@ -849,8 +900,10 @@ class FederationEventHandler: event: an event to check for missing prevs. Returns: - if we already had all the prev events, `None`. Otherwise, returns - the event ids of the state at `event`. + if we already had all the prev events, `None, None`. Otherwise, returns a + tuple containing: + * the event ids of the state at `event`. + * a boolean indicating whether the state may be partial. Raises: FederationError if we fail to get the state from the remote server after any @@ -864,7 +917,7 @@ class FederationEventHandler: missing_prevs = prevs - seen if not missing_prevs: - return None + return None, None logger.info( "Event %s is missing prev_events %s: calculating state for a " @@ -876,9 +929,15 @@ class FederationEventHandler: # resolve them to find the correct state at the current event. try: + # Determine whether we may be about to retrieve partial state + # Events may be un-partial stated right after we compute the partial state + # flag, but that's okay, as long as the flag errs on the conservative side. + partial_state_flags = await self._store.get_partial_state_events(seen) + partial_state = any(partial_state_flags.values()) + # Get the state of the events we know about ours = await self._state_storage_controller.get_state_groups_ids( - room_id, seen + room_id, seen, await_full_state=False ) # state_maps is a list of mappings from (type, state_key) to event_id @@ -924,7 +983,7 @@ class FederationEventHandler: "We can't get valid state history.", affected=event_id, ) - return state_map + return state_map, partial_state async def _get_state_ids_after_missing_prev_event( self, @@ -1094,6 +1153,7 @@ class FederationEventHandler: origin: str, event: EventBase, state_ids: Optional[StateMap[str]], + partial_state: Optional[bool], backfilled: bool = False, ) -> None: """Called when we have a new non-outlier event. @@ -1117,14 +1177,21 @@ class FederationEventHandler: state_ids: Normally None, but if we are handling a gap in the graph (ie, we are missing one or more prev_events), the resolved state at the - event. Must not be partial state. + event + + partial_state: + `True` if `state_ids` is partial and omits non-critical membership + events. + `False` if `state_ids` is the full state. + `None` if `state_ids` is not provided. In this case, the flag will be + calculated based on `event`'s prev events. backfilled: True if this is part of a historical batch of events (inhibits notification to clients, and validation of device keys.) PartialStateConflictError: if the room was un-partial stated in between computing the state at the event and persisting it. The caller should retry - exactly once in this case. Will never be raised if `state_ids` is provided. + exactly once in this case. """ logger.debug("Processing event: %s", event) assert not event.internal_metadata.outlier @@ -1132,6 +1199,7 @@ class FederationEventHandler: context = await self._state_handler.compute_event_context( event, state_ids_before_event=state_ids, + partial_state=partial_state, ) try: await self._check_event_auth(origin, event, context) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index bd7baef051..e0bcc40b93 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1135,6 +1135,10 @@ class EventCreationHandler: context = await self.state.compute_event_context( event, state_ids_before_event=state_map_for_event, + # TODO(faster_joins): check how MSC2716 works and whether we can have + # partial state here + # https://github.com/matrix-org/synapse/issues/13003 + partial_state=False, ) else: context = await self.state.compute_event_context(event) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 87ccd52f0a..69834de0de 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -255,7 +255,7 @@ class StateHandler: self, event: EventBase, state_ids_before_event: Optional[StateMap[str]] = None, - partial_state: bool = False, + partial_state: Optional[bool] = None, ) -> EventContext: """Build an EventContext structure for a non-outlier event. @@ -270,8 +270,12 @@ class StateHandler: it can't be calculated from existing events. This is normally only specified when receiving an event from federation where we don't have the prev events, e.g. when backfilling. - partial_state: True if `state_ids_before_event` is partial and omits - non-critical membership events + partial_state: + `True` if `state_ids_before_event` is partial and omits non-critical + membership events. + `False` if `state_ids_before_event` is the full state. + `None` when `state_ids_before_event` is not provided. In this case, the + flag will be calculated based on `event`'s prev events. Returns: The event context. """ @@ -298,12 +302,14 @@ class StateHandler: ) ) + # the partial_state flag must be provided + assert partial_state is not None else: # otherwise, we'll need to resolve the state across the prev_events. # partial_state should not be set explicitly in this case: # we work it out dynamically - assert not partial_state + assert partial_state is None # if any of the prev-events have partial state, so do we. # (This is slightly racy - the prev-events might get fixed up before we use @@ -313,13 +319,13 @@ class StateHandler: incomplete_prev_events = await self.store.get_partial_state_events( prev_event_ids ) - if any(incomplete_prev_events.values()): + partial_state = any(incomplete_prev_events.values()) + if partial_state: logger.debug( "New/incoming event %s refers to prev_events %s with partial state", event.event_id, [k for (k, v) in incomplete_prev_events.items() if v], ) - partial_state = True logger.debug("calling resolve_state_groups from compute_event_context") # we've already taken into account partial state, so no need to wait for diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index e08f956e6e..20805c94fa 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -82,13 +82,15 @@ class StateStorageController: return state_group_delta.prev_group, state_group_delta.delta_ids async def get_state_groups_ids( - self, _room_id: str, event_ids: Collection[str] + self, _room_id: str, event_ids: Collection[str], await_full_state: bool = True ) -> Dict[int, MutableStateMap[str]]: """Get the event IDs of all the state for the state groups for the given events Args: _room_id: id of the room for these events event_ids: ids of the events + await_full_state: if `True`, will block if we do not yet have complete + state at these events. Returns: dict of state_group_id -> (dict of (type, state_key) -> event id) @@ -100,7 +102,9 @@ class StateStorageController: if not event_ids: return {} - event_to_groups = await self.get_state_group_for_events(event_ids) + event_to_groups = await self.get_state_group_for_events( + event_ids, await_full_state=await_full_state + ) groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups(groups) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 8a0bb91f40..fb06e5e812 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -287,6 +287,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): state_ids={ (e.type, e.state_key): e.event_id for e in current_state }, + partial_state=False, ) ) diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 2ff88e64a5..3ce4f35cb7 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -70,7 +70,11 @@ class ExtremPruneTestCase(HomeserverTestCase): def persist_event(self, event, state=None): """Persist the event, with optional state""" context = self.get_success( - self.state.compute_event_context(event, state_ids_before_event=state) + self.state.compute_event_context( + event, + state_ids_before_event=state, + partial_state=None if state is None else False, + ) ) self.get_success(self._persistence.persist_event(event, context)) @@ -148,6 +152,7 @@ class ExtremPruneTestCase(HomeserverTestCase): self.state.compute_event_context( remote_event_2, state_ids_before_event=state_before_gap, + partial_state=False, ) ) diff --git a/tests/test_state.py b/tests/test_state.py index bafd6d1750..504530b49a 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -462,6 +462,7 @@ class StateTestCase(unittest.TestCase): state_ids_before_event={ (e.type, e.state_key): e.event_id for e in old_state }, + partial_state=False, ) ) @@ -492,6 +493,7 @@ class StateTestCase(unittest.TestCase): state_ids_before_event={ (e.type, e.state_key): e.event_id for e in old_state }, + partial_state=False, ) ) -- cgit 1.4.1 From 5d7e2b01957e9fc73722b388d4a0253ba54fb9ec Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 26 Jul 2022 12:45:19 +0100 Subject: Tweak changelog in response to review --- CHANGES.md | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 5623665cbb..3462ff5524 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,11 +9,11 @@ We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longe Features -------- -- Provide more info why we don't have any thumbnails to serve. ([\#13038](https://github.com/matrix-org/synapse/issues/13038)) -- Allow pagination from remote event after discovering it from MSC3030 `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205)) +- Improve error messages when media thumbnails cannot be served. ([\#13038](https://github.com/matrix-org/synapse/issues/13038)) +- Allow pagination from remote event after discovering it from [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205)) - Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208)) - Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220)) -- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13276](https://github.com/matrix-org/synapse/issues/13276)) +- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255), [\#13276](https://github.com/matrix-org/synapse/issues/13276)) - Support Implicit TLS for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317)) @@ -25,7 +25,7 @@ Bugfixes - Fix a long-standing bug where, in rare instances, Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278)) - Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296)) - Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350)) -- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13352](https://github.com/matrix-org/synapse/issues/13352)) +- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13284](https://github.com/matrix-org/synapse/issues/13284), [\#13352](https://github.com/matrix-org/synapse/issues/13352)) Improved Documentation @@ -34,7 +34,7 @@ Improved Documentation - Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231)) - Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261)) - Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271)) -- Add notes when config options were changed. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314)) +- Document that certain config options were added or changed in Synapse 1.62. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314)) - Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333)) - Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338)) - Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344)) @@ -61,10 +61,9 @@ Internal Changes - Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. ([\#13215](https://github.com/matrix-org/synapse/issues/13215)) - Remove unused database table `event_reference_hashes`. ([\#13218](https://github.com/matrix-org/synapse/issues/13218)) - Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). ([\#13224](https://github.com/matrix-org/synapse/issues/13224)) -- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. ([\#13240](https://github.com/matrix-org/synapse/issues/13240)) +- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. Contributed by @Vetchu. ([\#13240](https://github.com/matrix-org/synapse/issues/13240)) - Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13242](https://github.com/matrix-org/synapse/issues/13242), [\#13308](https://github.com/matrix-org/synapse/issues/13308)) - Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13251](https://github.com/matrix-org/synapse/issues/13251)) -- Preparatory work for a per-room rate limiter on joins. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255)) - Log the stack when waiting for an entire room to be un-partial stated. ([\#13257](https://github.com/matrix-org/synapse/issues/13257)) - Fix spurious warning when fetching state after a missing prev event. ([\#13258](https://github.com/matrix-org/synapse/issues/13258)) - Clean-up tests for notifications. ([\#13260](https://github.com/matrix-org/synapse/issues/13260)) @@ -72,8 +71,7 @@ Internal Changes - Don't pull out state in `compute_event_context` for unconflicted state. ([\#13267](https://github.com/matrix-org/synapse/issues/13267), [\#13274](https://github.com/matrix-org/synapse/issues/13274)) - Reduce the rebuild time for the complement-synapse docker image. ([\#13279](https://github.com/matrix-org/synapse/issues/13279)) - Don't pull out the full state when creating an event. ([\#13281](https://github.com/matrix-org/synapse/issues/13281), [\#13307](https://github.com/matrix-org/synapse/issues/13307)) -- Update locked version of `frozendict` to 2.3.2, which has a fix for a memory leak. ([\#13284](https://github.com/matrix-org/synapse/issues/13284)) -- Upgrade from Poetry 1.1.14 to 1.1.12, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285)) +- Upgrade from Poetry 1.1.12 to 1.1.14, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285)) - Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. ([\#13292](https://github.com/matrix-org/synapse/issues/13292)) - Use `HTTPStatus` constants in place of literals in tests. ([\#13297](https://github.com/matrix-org/synapse/issues/13297)) - Improve performance of query `_get_subset_users_in_room_with_profiles`. ([\#13299](https://github.com/matrix-org/synapse/issues/13299)) -- cgit 1.4.1 From ca3db044a3b5a207ff8d65ad7b761427ab215ccc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 26 Jul 2022 12:47:31 +0100 Subject: Fix infinite loop in partial-state resync (#13353) Make sure that we only pull out events from the db once they have no prev-events with partial state. --- changelog.d/13353.bugfix | 1 + synapse/handlers/federation_event.py | 14 +++++++------- synapse/storage/databases/main/events_worker.py | 20 +++++++++++++++++++- 3 files changed, 27 insertions(+), 8 deletions(-) create mode 100644 changelog.d/13353.bugfix diff --git a/changelog.d/13353.bugfix b/changelog.d/13353.bugfix new file mode 100644 index 0000000000..8e18bfae1f --- /dev/null +++ b/changelog.d/13353.bugfix @@ -0,0 +1 @@ +Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index fc1254d2ad..2ba2b1527e 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -569,15 +569,15 @@ class FederationEventHandler: if context is None or context.partial_state: # this can happen if some or all of the event's prev_events still have - # partial state - ie, an event has an earlier stream_ordering than one - # or more of its prev_events, so we de-partial-state it before its - # prev_events. + # partial state. We were careful to only pick events from the db without + # partial-state prev events, so that implies that a prev event has + # been persisted (with partial state) since we did the query. # - # TODO(faster_joins): we probably need to be more intelligent, and - # exclude partial-state prev_events from consideration - # https://github.com/matrix-org/synapse/issues/13001 + # So, let's just ignore `event` for now; when we re-run the db query + # we should instead get its partial-state prev event, which we will + # de-partial-state, and then come back to event. logger.warning( - "%s still has partial state: can't de-partial-state it yet", + "%s still has prev_events with partial state: can't de-partial-state it yet", event.event_id, ) return diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 5914a35420..29c99c6357 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -2110,11 +2110,29 @@ class EventsWorkerStore(SQLBaseStore): def _get_partial_state_events_batch_txn( txn: LoggingTransaction, room_id: str ) -> List[str]: + # we want to work through the events from oldest to newest, so + # we only want events whose prev_events do *not* have partial state - hence + # the 'NOT EXISTS' clause in the below. + # + # This is necessary because ordering by stream ordering isn't quite enough + # to ensure that we work from oldest to newest event (in particular, + # if an event is initially persisted as an outlier and later de-outliered, + # it can end up with a lower stream_ordering than its prev_events). + # + # Typically this means we'll only return one event per batch, but that's + # hard to do much about. + # + # See also: https://github.com/matrix-org/synapse/issues/13001 txn.execute( """ SELECT event_id FROM partial_state_events AS pse JOIN events USING (event_id) - WHERE pse.room_id = ? + WHERE pse.room_id = ? AND + NOT EXISTS( + SELECT 1 FROM event_edges AS ee + JOIN partial_state_events AS prev_pse ON (prev_pse.event_id=ee.prev_event_id) + WHERE ee.event_id=pse.event_id + ) ORDER BY events.stream_ordering LIMIT 100 """, -- cgit 1.4.1 From 33788a07ee23dbc4355fc1e5ec43a572342a5d24 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 26 Jul 2022 12:56:24 +0100 Subject: Explain less-known term 'Implicit TLS' --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 3462ff5524..da588454c7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -14,7 +14,7 @@ Features - Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208)) - Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220)) - Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255), [\#13276](https://github.com/matrix-org/synapse/issues/13276)) -- Support Implicit TLS for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317)) +- Support Implicit TLS (TLS without using a STARTTLS upgrade, typically on port 465) for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317)) Bugfixes -- cgit 1.4.1 From 57d334a13d983406ea452dfa203bbe4837509c4e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 26 Jul 2022 08:02:34 -0400 Subject: Remove the unspecced `room_id` field in the `/hierarchy` response. (#13365) The `room_id` field represented the parent space for each room and was made redundant by changes in the API shape where the `children_state` is now nested underneath each `room`. The room ID of each child is in the `state_key` field and is still available. --- changelog.d/13365.bugfix | 1 + synapse/handlers/room_summary.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/13365.bugfix diff --git a/changelog.d/13365.bugfix b/changelog.d/13365.bugfix new file mode 100644 index 0000000000..b915c3158c --- /dev/null +++ b/changelog.d/13365.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 13098f56ed..85811b5bde 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -452,7 +452,6 @@ class RoomSummaryHandler: "type": e.type, "state_key": e.state_key, "content": e.content, - "room_id": e.room_id, "sender": e.sender, "origin_server_ts": e.origin_server_ts, } -- cgit 1.4.1 From 543dc9c93e75ac97bd78bdb77d8795487b82abf3 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 26 Jul 2022 17:08:14 +0000 Subject: Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. (#12978) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/12978.misc | 1 + scripts-dev/release.py | 86 +++++++++++++++++++++++++++++++------------------- 2 files changed, 55 insertions(+), 32 deletions(-) create mode 100644 changelog.d/12978.misc diff --git a/changelog.d/12978.misc b/changelog.d/12978.misc new file mode 100644 index 0000000000..050c9047fc --- /dev/null +++ b/changelog.d/12978.misc @@ -0,0 +1 @@ +Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 0031ba3e4b..5bfd750118 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -55,9 +55,12 @@ def run_until_successful( def cli() -> None: """An interactive script to walk through the parts of creating a release. - Requires the dev dependencies be installed, which can be done via: + Requirements: + - The dev dependencies be installed, which can be done via: - pip install -e .[dev] + pip install -e .[dev] + + - A checkout of the sytest repository at ../sytest Then to use: @@ -89,10 +92,12 @@ def prepare() -> None: """ # Make sure we're in a git repo. - repo = get_repo_and_check_clean_checkout() + synapse_repo = get_repo_and_check_clean_checkout() + sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest") - click.secho("Updating git repo...") - repo.remote().fetch() + click.secho("Updating Synapse and Sytest git repos...") + synapse_repo.remote().fetch() + sytest_repo.remote().fetch() # Get the current version and AST from root Synapse module. current_version = get_package_version() @@ -166,12 +171,12 @@ def prepare() -> None: assert not parsed_new_version.is_postrelease release_branch_name = get_release_branch_name(parsed_new_version) - release_branch = find_ref(repo, release_branch_name) + release_branch = find_ref(synapse_repo, release_branch_name) if release_branch: if release_branch.is_remote(): # If the release branch only exists on the remote we check it out # locally. - repo.git.checkout(release_branch_name) + synapse_repo.git.checkout(release_branch_name) else: # If a branch doesn't exist we create one. We ask which one branch it # should be based off, defaulting to sensible values depending on the @@ -187,25 +192,34 @@ def prepare() -> None: "Which branch should the release be based on?", default=default ) - base_branch = find_ref(repo, branch_name) - if not base_branch: - print(f"Could not find base branch {branch_name}!") - click.get_current_context().abort() + for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items(): + base_branch = find_ref(repo, branch_name) + if not base_branch: + print(f"Could not find base branch {branch_name} for {repo_name}!") + click.get_current_context().abort() - # Check out the base branch and ensure it's up to date - repo.head.set_reference(base_branch, "check out the base branch") - repo.head.reset(index=True, working_tree=True) - if not base_branch.is_remote(): - update_branch(repo) + # Check out the base branch and ensure it's up to date + repo.head.set_reference( + base_branch, f"check out the base branch for {repo_name}" + ) + repo.head.reset(index=True, working_tree=True) + if not base_branch.is_remote(): + update_branch(repo) - # Create the new release branch - # Type ignore will no longer be needed after GitPython 3.1.28. - # See https://github.com/gitpython-developers/GitPython/pull/1419 - repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type] + # Create the new release branch + # Type ignore will no longer be needed after GitPython 3.1.28. + # See https://github.com/gitpython-developers/GitPython/pull/1419 + repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type] + + # Special-case SyTest: we don't actually prepare any files so we may + # as well push it now (and only when we create a release branch; + # not on subsequent RCs or full releases). + if click.confirm("Push new SyTest branch?", default=True): + sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name) # Switch to the release branch and ensure it's up to date. - repo.git.checkout(release_branch_name) - update_branch(repo) + synapse_repo.git.checkout(release_branch_name) + update_branch(synapse_repo) # Update the version specified in pyproject.toml. subprocess.check_output(["poetry", "version", new_version]) @@ -230,15 +244,15 @@ def prepare() -> None: run_until_successful('dch -M -r -D stable ""', shell=True) # Show the user the changes and ask if they want to edit the change log. - repo.git.add("-u") + synapse_repo.git.add("-u") subprocess.run("git diff --cached", shell=True) if click.confirm("Edit changelog?", default=False): click.edit(filename="CHANGES.md") # Commit the changes. - repo.git.add("-u") - repo.git.commit("-m", new_version) + synapse_repo.git.add("-u") + synapse_repo.git.commit("-m", new_version) # We give the option to bail here in case the user wants to make sure things # are OK before pushing. @@ -246,17 +260,21 @@ def prepare() -> None: print("") print("Run when ready to push:") print("") - print(f"\tgit push -u {repo.remote().name} {repo.active_branch.name}") + print( + f"\tgit push -u {synapse_repo.remote().name} {synapse_repo.active_branch.name}" + ) print("") sys.exit(0) # Otherwise, push and open the changelog in the browser. - repo.git.push("-u", repo.remote().name, repo.active_branch.name) + synapse_repo.git.push( + "-u", synapse_repo.remote().name, synapse_repo.active_branch.name + ) print("Opening the changelog in your browser...") print("Please ask others to give it a check.") click.launch( - f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md" + f"https://github.com/matrix-org/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md" ) @@ -469,14 +487,18 @@ def get_release_branch_name(version_number: version.Version) -> str: return f"release-v{version_number.major}.{version_number.minor}" -def get_repo_and_check_clean_checkout() -> git.Repo: +def get_repo_and_check_clean_checkout( + path: str = ".", name: str = "synapse" +) -> git.Repo: """Get the project repo and check it's not got any uncommitted changes.""" try: - repo = git.Repo() + repo = git.Repo(path=path) except git.InvalidGitRepositoryError: - raise click.ClickException("Not in Synapse repo.") + raise click.ClickException( + f"{path} is not a git repository (expecting a {name} repository)." + ) if repo.is_dirty(): - raise click.ClickException("Uncommitted changes exist.") + raise click.ClickException(f"Uncommitted changes exist in {path}.") return repo -- cgit 1.4.1 From bf3115584c876392e7849cd4ba2271165572588d Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Tue, 26 Jul 2022 18:45:27 +0100 Subject: Copy room serials before handling in `get_new_events_as` (#13392) --- changelog.d/13392.bugfix | 1 + synapse/handlers/typing.py | 13 ++++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13392.bugfix diff --git a/changelog.d/13392.bugfix b/changelog.d/13392.bugfix new file mode 100644 index 0000000000..7d83c77550 --- /dev/null +++ b/changelog.d/13392.bugfix @@ -0,0 +1 @@ +Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index d104ea07fe..27aa0d3126 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -489,8 +489,15 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]): handler = self.get_typing_handler() events = [] - for room_id in handler._room_serials.keys(): - if handler._room_serials[room_id] <= from_key: + + # Work on a copy of things here as these may change in the handler while + # waiting for the AS `is_interested_in_room` call to complete. + # Shallow copy is safe as no nested data is present. + latest_room_serial = handler._latest_room_serial + room_serials = handler._room_serials.copy() + + for room_id, serial in room_serials.items(): + if serial <= from_key: continue if not await service.is_interested_in_room(room_id, self._main_store): @@ -498,7 +505,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]): events.append(self._make_event_for(room_id)) - return events, handler._latest_room_serial + return events, latest_room_serial async def get_new_events( self, -- cgit 1.4.1 From 4f3082d6bf85335d10775a2e869420592189c6b2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 27 Jul 2022 04:40:04 -0500 Subject: Fix `get_pdu` asking every remote destination even after it finds an event (#13346) --- changelog.d/13346.misc | 1 + synapse/federation/federation_client.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13346.misc diff --git a/changelog.d/13346.misc b/changelog.d/13346.misc new file mode 100644 index 0000000000..06557c8481 --- /dev/null +++ b/changelog.d/13346.misc @@ -0,0 +1 @@ +Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 842f5327c2..02276ed995 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -403,9 +403,9 @@ class FederationClient(FederationBase): # Prime the cache self._get_pdu_cache[event.event_id] = event - # FIXME: We should add a `break` here to avoid calling every - # destination after we already found a PDU (will follow-up - # in a separate PR) + # Now that we have an event, we can break out of this + # loop and stop asking other destinations. + break except SynapseError as e: logger.info( -- cgit 1.4.1 From 39be5bc550f2e882b4754c7d98c906d9bde8b649 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 27 Jul 2022 10:37:50 +0000 Subject: Make minor clarifications to the error messages given when we fail to join a room via any server. (#13160) --- changelog.d/13160.misc | 1 + synapse/federation/federation_client.py | 8 +++++++- synapse/handlers/room_member.py | 6 +++++- tests/rest/admin/test_room.py | 5 ++++- 4 files changed, 17 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13160.misc diff --git a/changelog.d/13160.misc b/changelog.d/13160.misc new file mode 100644 index 0000000000..36ff50c2a6 --- /dev/null +++ b/changelog.d/13160.misc @@ -0,0 +1 @@ +Make minor clarifications to the error messages given when we fail to join a room via any server. \ No newline at end of file diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 02276ed995..6a8d76529b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -725,6 +725,12 @@ class FederationClient(FederationBase): if failover_errcodes is None: failover_errcodes = () + if not destinations: + # Give a bit of a clearer message if no servers were specified at all. + raise SynapseError( + 502, f"Failed to {description} via any server: No servers specified." + ) + for destination in destinations: if destination == self.server_name: continue @@ -774,7 +780,7 @@ class FederationClient(FederationBase): "Failed to %s via %s", description, destination, exc_info=True ) - raise SynapseError(502, "Failed to %s via any server" % (description,)) + raise SynapseError(502, f"Failed to {description} via any server") async def make_membership_event( self, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 30b4cb23df..520c52e013 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1679,7 +1679,11 @@ class RoomMemberMasterHandler(RoomMemberHandler): ] if len(remote_room_hosts) == 0: - raise SynapseError(404, "No known servers") + raise SynapseError( + 404, + "Can't join remote room because no servers " + "that are in the room have been provided.", + ) check_complexity = self.hs.config.server.limit_remote_rooms.enabled if ( diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 2526136ff8..623883b53c 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1873,7 +1873,10 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): ) self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) - self.assertEqual("No known servers", channel.json_body["error"]) + self.assertEqual( + "Can't join remote room because no servers that are in the room have been provided.", + channel.json_body["error"], + ) def test_room_is_not_valid(self) -> None: """ -- cgit 1.4.1 From 502f075e96b458a183952ae2be402f00b28af299 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 27 Jul 2022 13:44:40 +0100 Subject: Implement MSC3848: Introduce errcodes for specific event sending failures (#13343) Implements MSC3848 --- changelog.d/13343.feature | 1 + synapse/api/auth.py | 11 +++-- synapse/api/errors.py | 58 ++++++++++++++++++++++----- synapse/config/experimental.py | 3 ++ synapse/event_auth.py | 62 ++++++++++++++++++++++++----- synapse/federation/federation_server.py | 2 +- synapse/handlers/auth.py | 2 +- synapse/handlers/message.py | 13 +++++- synapse/handlers/room_summary.py | 5 ++- synapse/http/server.py | 18 +++++++-- tests/rest/client/test_third_party_rules.py | 5 ++- 11 files changed, 144 insertions(+), 36 deletions(-) create mode 100644 changelog.d/13343.feature diff --git a/changelog.d/13343.feature b/changelog.d/13343.feature new file mode 100644 index 0000000000..c151251e54 --- /dev/null +++ b/changelog.d/13343.feature @@ -0,0 +1 @@ +Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in MSC3848. \ No newline at end of file diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 6e6eaf3805..82e6475ef5 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( Codes, InvalidClientTokenError, MissingClientTokenError, + UnstableSpecAuthError, ) from synapse.appservice import ApplicationService from synapse.http import get_request_user_agent @@ -106,8 +107,11 @@ class Auth: forgot = await self.store.did_forget(user_id, room_id) if not forgot: return membership, member_event_id - - raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) + raise UnstableSpecAuthError( + 403, + "User %s not in room %s" % (user_id, room_id), + errcode=Codes.NOT_JOINED, + ) async def get_user_by_req( self, @@ -600,8 +604,9 @@ class Auth: == HistoryVisibility.WORLD_READABLE ): return Membership.JOIN, None - raise AuthError( + raise UnstableSpecAuthError( 403, "User %s not in room %s, and room previews are disabled" % (user_id, room_id), + errcode=Codes.NOT_JOINED, ) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 1c74e131f2..e6dea89c6d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -26,6 +26,7 @@ from twisted.web import http from synapse.util import json_decoder if typing.TYPE_CHECKING: + from synapse.config.homeserver import HomeServerConfig from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -80,6 +81,12 @@ class Codes(str, Enum): INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" + # Part of MSC3848 + # https://github.com/matrix-org/matrix-spec-proposals/pull/3848 + ALREADY_JOINED = "ORG.MATRIX.MSC3848.ALREADY_JOINED" + NOT_JOINED = "ORG.MATRIX.MSC3848.NOT_JOINED" + INSUFFICIENT_POWER = "ORG.MATRIX.MSC3848.INSUFFICIENT_POWER" + # The account has been suspended on the server. # By opposition to `USER_DEACTIVATED`, this is a reversible measure # that can possibly be appealed and reverted. @@ -167,7 +174,7 @@ class SynapseError(CodeMessageException): else: self._additional_fields = dict(additional_fields) - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, **self._additional_fields) @@ -213,7 +220,7 @@ class ConsentNotGivenError(SynapseError): ) self._consent_uri = consent_uri - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri) @@ -307,6 +314,37 @@ class AuthError(SynapseError): super().__init__(code, msg, errcode, additional_fields) +class UnstableSpecAuthError(AuthError): + """An error raised when a new error code is being proposed to replace a previous one. + This error will return a "org.matrix.unstable.errcode" property with the new error code, + with the previous error code still being defined in the "errcode" property. + + This error will include `org.matrix.msc3848.unstable.errcode` in the C-S error body. + """ + + def __init__( + self, + code: int, + msg: str, + errcode: str, + previous_errcode: str = Codes.FORBIDDEN, + additional_fields: Optional[dict] = None, + ): + self.previous_errcode = previous_errcode + super().__init__(code, msg, errcode, additional_fields) + + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": + fields = {} + if config is not None and config.experimental.msc3848_enabled: + fields["org.matrix.msc3848.unstable.errcode"] = self.errcode + return cs_error( + self.msg, + self.previous_errcode, + **fields, + **self._additional_fields, + ) + + class InvalidClientCredentialsError(SynapseError): """An error raised when there was a problem with the authorisation credentials in a client request. @@ -338,8 +376,8 @@ class InvalidClientTokenError(InvalidClientCredentialsError): super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN") self._soft_logout = soft_logout - def error_dict(self) -> "JsonDict": - d = super().error_dict() + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": + d = super().error_dict(config) d["soft_logout"] = self._soft_logout return d @@ -362,7 +400,7 @@ class ResourceLimitError(SynapseError): self.limit_type = limit_type super().__init__(code, msg, errcode=errcode) - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error( self.msg, self.errcode, @@ -397,7 +435,7 @@ class InvalidCaptchaError(SynapseError): super().__init__(code, msg, errcode) self.error_url = error_url - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, error_url=self.error_url) @@ -414,7 +452,7 @@ class LimitExceededError(SynapseError): super().__init__(code, msg, errcode) self.retry_after_ms = retry_after_ms - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms) @@ -429,7 +467,7 @@ class RoomKeysVersionError(SynapseError): super().__init__(403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION) self.current_version = current_version - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, current_version=self.current_version) @@ -469,7 +507,7 @@ class IncompatibleRoomVersionError(SynapseError): self._room_version = room_version - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, room_version=self._room_version) @@ -515,7 +553,7 @@ class UnredactedContentDeletedError(SynapseError): ) self.content_keep_ms = content_keep_ms - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": extra = {} if self.content_keep_ms is not None: extra = {"fi.mau.msc2815.content_keep_ms": self.content_keep_ms} diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index ee443cea00..1902222d7b 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -90,3 +90,6 @@ class ExperimentalConfig(Config): # MSC3827: Filtering of /publicRooms by room type self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False) + + # MSC3848: Introduce errcodes for specific event sending failures + self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 965cb265da..389b0c5d53 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -30,7 +30,13 @@ from synapse.api.constants import ( JoinRules, Membership, ) -from synapse.api.errors import AuthError, EventSizeError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + EventSizeError, + SynapseError, + UnstableSpecAuthError, +) from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, EventFormatVersions, @@ -291,7 +297,11 @@ def check_state_dependent_auth_rules( invite_level = get_named_level(auth_dict, "invite", 0) if user_level < invite_level: - raise AuthError(403, "You don't have permission to invite users") + raise UnstableSpecAuthError( + 403, + "You don't have permission to invite users", + errcode=Codes.INSUFFICIENT_POWER, + ) else: logger.debug("Allowing! %s", event) return @@ -474,7 +484,11 @@ def _is_membership_change_allowed( return if not caller_in_room: # caller isn't joined - raise AuthError(403, "%s not in room %s." % (event.user_id, event.room_id)) + raise UnstableSpecAuthError( + 403, + "%s not in room %s." % (event.user_id, event.room_id), + errcode=Codes.NOT_JOINED, + ) if Membership.INVITE == membership: # TODO (erikj): We should probably handle this more intelligently @@ -484,10 +498,18 @@ def _is_membership_change_allowed( if target_banned: raise AuthError(403, "%s is banned from the room" % (target_user_id,)) elif target_in_room: # the target is already in the room. - raise AuthError(403, "%s is already in the room." % target_user_id) + raise UnstableSpecAuthError( + 403, + "%s is already in the room." % target_user_id, + errcode=Codes.ALREADY_JOINED, + ) else: if user_level < invite_level: - raise AuthError(403, "You don't have permission to invite users") + raise UnstableSpecAuthError( + 403, + "You don't have permission to invite users", + errcode=Codes.INSUFFICIENT_POWER, + ) elif Membership.JOIN == membership: # Joins are valid iff caller == target and: # * They are not banned. @@ -549,15 +571,27 @@ def _is_membership_change_allowed( elif Membership.LEAVE == membership: # TODO (erikj): Implement kicks. if target_banned and user_level < ban_level: - raise AuthError(403, "You cannot unban user %s." % (target_user_id,)) + raise UnstableSpecAuthError( + 403, + "You cannot unban user %s." % (target_user_id,), + errcode=Codes.INSUFFICIENT_POWER, + ) elif target_user_id != event.user_id: kick_level = get_named_level(auth_events, "kick", 50) if user_level < kick_level or user_level <= target_level: - raise AuthError(403, "You cannot kick user %s." % target_user_id) + raise UnstableSpecAuthError( + 403, + "You cannot kick user %s." % target_user_id, + errcode=Codes.INSUFFICIENT_POWER, + ) elif Membership.BAN == membership: if user_level < ban_level or user_level <= target_level: - raise AuthError(403, "You don't have permission to ban") + raise UnstableSpecAuthError( + 403, + "You don't have permission to ban", + errcode=Codes.INSUFFICIENT_POWER, + ) elif room_version.msc2403_knocking and Membership.KNOCK == membership: if join_rule != JoinRules.KNOCK and ( not room_version.msc3787_knock_restricted_join_rule @@ -567,7 +601,11 @@ def _is_membership_change_allowed( elif target_user_id != event.user_id: raise AuthError(403, "You cannot knock for other users") elif target_in_room: - raise AuthError(403, "You cannot knock on a room you are already in") + raise UnstableSpecAuthError( + 403, + "You cannot knock on a room you are already in", + errcode=Codes.ALREADY_JOINED, + ) elif caller_invited: raise AuthError(403, "You are already invited to this room") elif target_banned: @@ -638,10 +676,11 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b user_level = get_user_power_level(event.user_id, auth_events) if user_level < send_level: - raise AuthError( + raise UnstableSpecAuthError( 403, "You don't have permission to post that to the room. " + "user_level (%d) < send_level (%d)" % (user_level, send_level), + errcode=Codes.INSUFFICIENT_POWER, ) # Check state_key @@ -716,9 +755,10 @@ def check_historical( historical_level = get_named_level(auth_events, "historical", 100) if user_level < historical_level: - raise AuthError( + raise UnstableSpecAuthError( 403, 'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")', + errcode=Codes.INSUFFICIENT_POWER, ) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index ae550d3f4d..1d60137411 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -469,7 +469,7 @@ class FederationServer(FederationBase): ) for pdu in pdus_by_room[room_id]: event_id = pdu.event_id - pdu_results[event_id] = e.error_dict() + pdu_results[event_id] = e.error_dict(self.hs.config) return for pdu in pdus_by_room[room_id]: diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 3d83236b0c..bfa5535044 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -565,7 +565,7 @@ class AuthHandler: except LoginError as e: # this step failed. Merge the error dict into the response # so that the client can have another go. - errordict = e.error_dict() + errordict = e.error_dict(self.hs.config) creds = await self.store.get_completed_ui_auth_stages(session.session_id) for f in flows: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e0bcc40b93..e85b540451 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -41,6 +41,7 @@ from synapse.api.errors import ( NotFoundError, ShadowBanError, SynapseError, + UnstableSpecAuthError, UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -149,7 +150,11 @@ class MessageHandler: "Attempted to retrieve data from a room for a user that has never been in it. " "This should not have happened." ) - raise SynapseError(403, "User not in room", errcode=Codes.FORBIDDEN) + raise UnstableSpecAuthError( + 403, + "User not in room", + errcode=Codes.NOT_JOINED, + ) return data @@ -334,7 +339,11 @@ class MessageHandler: break else: # Loop fell through, AS has no interested users in room - raise AuthError(403, "Appservice not in room") + raise UnstableSpecAuthError( + 403, + "Appservice not in room", + errcode=Codes.NOT_JOINED, + ) return { user_id: { diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 85811b5bde..ebd445adca 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -28,11 +28,11 @@ from synapse.api.constants import ( RoomTypes, ) from synapse.api.errors import ( - AuthError, Codes, NotFoundError, StoreError, SynapseError, + UnstableSpecAuthError, UnsupportedRoomVersionError, ) from synapse.api.ratelimiting import Ratelimiter @@ -175,10 +175,11 @@ class RoomSummaryHandler: # First of all, check that the room is accessible. if not await self._is_local_room_accessible(requested_room_id, requester): - raise AuthError( + raise UnstableSpecAuthError( 403, "User %s not in room %s, and room previews are disabled" % (requester, requested_room_id), + errcode=Codes.NOT_JOINED, ) # If this is continuing a previous session, pull the persisted data. diff --git a/synapse/http/server.py b/synapse/http/server.py index cf2d6f904b..19f42159b8 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -58,6 +58,7 @@ from synapse.api.errors import ( SynapseError, UnrecognizedRequestError, ) +from synapse.config.homeserver import HomeServerConfig from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet @@ -155,15 +156,16 @@ def is_method_cancellable(method: Callable[..., Any]) -> bool: return getattr(method, "cancellable", False) -def return_json_error(f: failure.Failure, request: SynapseRequest) -> None: +def return_json_error( + f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig] +) -> None: """Sends a JSON error response to clients.""" if f.check(SynapseError): # mypy doesn't understand that f.check asserts the type. exc: SynapseError = f.value # type: ignore error_code = exc.code - error_dict = exc.error_dict() - + error_dict = exc.error_dict(config) logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg) elif f.check(CancelledError): error_code = HTTP_STATUS_REQUEST_CANCELLED @@ -450,7 +452,7 @@ class DirectServeJsonResource(_AsyncResource): request: SynapseRequest, ) -> None: """Implements _AsyncResource._send_error_response""" - return_json_error(f, request) + return_json_error(f, request, None) @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -575,6 +577,14 @@ class JsonResource(DirectServeJsonResource): return callback_return + def _send_error_response( + self, + f: failure.Failure, + request: SynapseRequest, + ) -> None: + """Implements _AsyncResource._send_error_response""" + return_json_error(f, request, self.hs.config) + class DirectServeHtmlResource(_AsyncResource): """A resource that will call `self._async_on_` on new requests, diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 9a48e9286f..18a7195409 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -20,6 +20,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, LoginType, Membership from synapse.api.errors import SynapseError from synapse.api.room_versions import RoomVersion +from synapse.config.homeserver import HomeServerConfig from synapse.events import EventBase from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.rest import admin @@ -185,12 +186,12 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ class NastyHackException(SynapseError): - def error_dict(self) -> JsonDict: + def error_dict(self, config: Optional[HomeServerConfig]) -> JsonDict: """ This overrides SynapseError's `error_dict` to nastily inject JSON into the error response. """ - result = super().error_dict() + result = super().error_dict(config) result["nasty"] = "very" return result -- cgit 1.4.1 From 922b771337f6d14a556fa761c783748f698e924b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 27 Jul 2022 13:18:41 -0400 Subject: Add missing type hints for tests.unittest. (#13397) --- changelog.d/13397.misc | 1 + tests/handlers/test_directory.py | 12 +----- tests/rest/client/test_relations.py | 6 ++- tests/rest/client/test_rooms.py | 2 +- tests/server.py | 11 ++++- tests/unittest.py | 86 +++++++++++++++++++++---------------- 6 files changed, 66 insertions(+), 52 deletions(-) create mode 100644 changelog.d/13397.misc diff --git a/changelog.d/13397.misc b/changelog.d/13397.misc new file mode 100644 index 0000000000..8dc610d9e2 --- /dev/null +++ b/changelog.d/13397.misc @@ -0,0 +1 @@ +Adding missing type hints to tests. diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 53d49ca896..3b72c4c9d0 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -481,17 +481,13 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): return config - def prepare( - self, reactor: MemoryReactor, clock: Clock, hs: HomeServer - ) -> HomeServer: + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.allowed_user_id = self.register_user(self.allowed_localpart, "pass") self.allowed_access_token = self.login(self.allowed_localpart, "pass") self.denied_user_id = self.register_user("denied", "pass") self.denied_access_token = self.login("denied", "pass") - return hs - def test_denied_without_publication_permission(self) -> None: """ Try to create a room, register an alias for it, and publish it, @@ -575,9 +571,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def prepare( - self, reactor: MemoryReactor, clock: Clock, hs: HomeServer - ) -> HomeServer: + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -588,8 +582,6 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): self.room_list_handler = hs.get_room_list_handler() self.directory_handler = hs.get_directory_handler() - return hs - def test_disabling_room_list(self) -> None: self.room_list_handler.enable_room_list_search = True self.directory_handler.enable_room_list_search = True diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index ad03eee17b..d589f07314 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1060,6 +1060,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): participated, bundled_aggregations.get("current_user_participated") ) # The latest thread event has some fields that don't matter. + self.assertIn("latest_event", bundled_aggregations) self.assert_dict( { "content": { @@ -1072,7 +1073,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): "sender": self.user2_id, "type": "m.room.test", }, - bundled_aggregations.get("latest_event"), + bundled_aggregations["latest_event"], ) return assert_thread @@ -1112,6 +1113,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): self.assertEqual(2, bundled_aggregations.get("count")) self.assertTrue(bundled_aggregations.get("current_user_participated")) # The latest thread event has some fields that don't matter. + self.assertIn("latest_event", bundled_aggregations) self.assert_dict( { "content": { @@ -1124,7 +1126,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): "sender": self.user_id, "type": "m.room.test", }, - bundled_aggregations.get("latest_event"), + bundled_aggregations["latest_event"], ) # Check the unsigned field on the latest event. self.assert_dict( diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index c45cb32090..2272d55d84 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -496,7 +496,7 @@ class RoomStateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertCountEqual( - [state_event["type"] for state_event in channel.json_body], + [state_event["type"] for state_event in channel.json_list], { "m.room.create", "m.room.power_levels", diff --git a/tests/server.py b/tests/server.py index df3f1564c9..9689e6a0cd 100644 --- a/tests/server.py +++ b/tests/server.py @@ -25,6 +25,7 @@ from typing import ( Callable, Dict, Iterable, + List, MutableMapping, Optional, Tuple, @@ -121,7 +122,15 @@ class FakeChannel: @property def json_body(self) -> JsonDict: - return json.loads(self.text_body) + body = json.loads(self.text_body) + assert isinstance(body, dict) + return body + + @property + def json_list(self) -> List[JsonDict]: + body = json.loads(self.text_body) + assert isinstance(body, list) + return body @property def text_body(self) -> str: diff --git a/tests/unittest.py b/tests/unittest.py index 66ce92f4a6..bec4a3d023 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -28,6 +28,7 @@ from typing import ( Generic, Iterable, List, + NoReturn, Optional, Tuple, Type, @@ -39,7 +40,7 @@ from unittest.mock import Mock, patch import canonicaljson import signedjson.key import unpaddedbase64 -from typing_extensions import Protocol +from typing_extensions import Concatenate, ParamSpec, Protocol from twisted.internet.defer import Deferred, ensureDeferred from twisted.python.failure import Failure @@ -67,7 +68,7 @@ from synapse.logging.context import ( from synapse.rest import RegisterServletsFunc from synapse.server import HomeServer from synapse.storage.keys import FetchKeyResult -from synapse.types import JsonDict, UserID, create_requester +from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree @@ -88,6 +89,10 @@ setup_logging() TV = TypeVar("TV") _ExcType = TypeVar("_ExcType", bound=BaseException, covariant=True) +P = ParamSpec("P") +R = TypeVar("R") +S = TypeVar("S") + class _TypedFailure(Generic[_ExcType], Protocol): """Extension to twisted.Failure, where the 'value' has a certain type.""" @@ -97,7 +102,7 @@ class _TypedFailure(Generic[_ExcType], Protocol): ... -def around(target): +def around(target: TV) -> Callable[[Callable[Concatenate[S, P], R]], None]: """A CLOS-style 'around' modifier, which wraps the original method of the given instance with another piece of code. @@ -106,11 +111,11 @@ def around(target): return orig(*args, **kwargs) """ - def _around(code): + def _around(code: Callable[Concatenate[S, P], R]) -> None: name = code.__name__ orig = getattr(target, name) - def new(*args, **kwargs): + def new(*args: P.args, **kwargs: P.kwargs) -> R: return code(orig, *args, **kwargs) setattr(target, name, new) @@ -131,7 +136,7 @@ class TestCase(unittest.TestCase): level = getattr(method, "loglevel", getattr(self, "loglevel", None)) @around(self) - def setUp(orig): + def setUp(orig: Callable[[], R]) -> R: # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if current_context(): @@ -144,7 +149,7 @@ class TestCase(unittest.TestCase): if level is not None and old_level != level: @around(self) - def tearDown(orig): + def tearDown(orig: Callable[[], R]) -> R: ret = orig() logging.getLogger().setLevel(old_level) return ret @@ -158,7 +163,7 @@ class TestCase(unittest.TestCase): return orig() @around(self) - def tearDown(orig): + def tearDown(orig: Callable[[], R]) -> R: ret = orig() # force a GC to workaround problems with deferreds leaking logcontexts when # they are GCed (see the logcontext docs) @@ -167,7 +172,7 @@ class TestCase(unittest.TestCase): return ret - def assertObjectHasAttributes(self, attrs, obj): + def assertObjectHasAttributes(self, attrs: Dict[str, object], obj: object) -> None: """Asserts that the given object has each of the attributes given, and that the value of each matches according to assertEqual.""" for key in attrs.keys(): @@ -178,12 +183,12 @@ class TestCase(unittest.TestCase): except AssertionError as e: raise (type(e))(f"Assert error for '.{key}':") from e - def assert_dict(self, required, actual): + def assert_dict(self, required: dict, actual: dict) -> None: """Does a partial assert of a dict. Args: - required (dict): The keys and value which MUST be in 'actual'. - actual (dict): The test result. Extra keys will not be checked. + required: The keys and value which MUST be in 'actual'. + actual: The test result. Extra keys will not be checked. """ for key in required: self.assertEqual( @@ -191,31 +196,31 @@ class TestCase(unittest.TestCase): ) -def DEBUG(target): +def DEBUG(target: TV) -> TV: """A decorator to set the .loglevel attribute to logging.DEBUG. Can apply to either a TestCase or an individual test method.""" - target.loglevel = logging.DEBUG + target.loglevel = logging.DEBUG # type: ignore[attr-defined] return target -def INFO(target): +def INFO(target: TV) -> TV: """A decorator to set the .loglevel attribute to logging.INFO. Can apply to either a TestCase or an individual test method.""" - target.loglevel = logging.INFO + target.loglevel = logging.INFO # type: ignore[attr-defined] return target -def logcontext_clean(target): +def logcontext_clean(target: TV) -> TV: """A decorator which marks the TestCase or method as 'logcontext_clean' ... ie, any logcontext errors should cause a test failure """ - def logcontext_error(msg): + def logcontext_error(msg: str) -> NoReturn: raise AssertionError("logcontext error: %s" % (msg)) patcher = patch("synapse.logging.context.logcontext_error", new=logcontext_error) - return patcher(target) + return patcher(target) # type: ignore[call-overload] class HomeserverTestCase(TestCase): @@ -255,7 +260,7 @@ class HomeserverTestCase(TestCase): method = getattr(self, methodName) self._extra_config = getattr(method, "_extra_config", None) - def setUp(self): + def setUp(self) -> None: """ Set up the TestCase by calling the homeserver constructor, optionally hijacking the authentication system to return a fixed user, and then @@ -306,7 +311,9 @@ class HomeserverTestCase(TestCase): ) ) - async def get_user_by_access_token(token=None, allow_guest=False): + async def get_user_by_access_token( + token: Optional[str] = None, allow_guest: bool = False + ) -> JsonDict: assert self.helper.auth_user_id is not None return { "user": UserID.from_string(self.helper.auth_user_id), @@ -314,7 +321,11 @@ class HomeserverTestCase(TestCase): "is_guest": False, } - async def get_user_by_req(request, allow_guest=False): + async def get_user_by_req( + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + ) -> Requester: assert self.helper.auth_user_id is not None return create_requester( UserID.from_string(self.helper.auth_user_id), @@ -339,11 +350,11 @@ class HomeserverTestCase(TestCase): if hasattr(self, "prepare"): self.prepare(self.reactor, self.clock, self.hs) - def tearDown(self): + def tearDown(self) -> None: # Reset to not use frozen dicts. events.USE_FROZEN_DICTS = False - def wait_on_thread(self, deferred, timeout=10): + def wait_on_thread(self, deferred: Deferred, timeout: int = 10) -> None: """ Wait until a Deferred is done, where it's waiting on a real thread. """ @@ -374,7 +385,7 @@ class HomeserverTestCase(TestCase): clock (synapse.util.Clock): The Clock, associated with the reactor. Returns: - A homeserver (synapse.server.HomeServer) suitable for testing. + A homeserver suitable for testing. Function to be overridden in subclasses. """ @@ -408,7 +419,7 @@ class HomeserverTestCase(TestCase): "/_synapse/admin": servlet_resource, } - def default_config(self): + def default_config(self) -> JsonDict: """ Get a default HomeServer config dict. """ @@ -421,7 +432,9 @@ class HomeserverTestCase(TestCase): return config - def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: """ Prepare for the test. This involves things like mocking out parts of the homeserver, or building test data common across the whole test @@ -519,7 +532,7 @@ class HomeserverTestCase(TestCase): config_obj.parse_config_dict(config, "", "") kwargs["config"] = config_obj - async def run_bg_updates(): + async def run_bg_updates() -> None: with LoggingContext("run_bg_updates"): self.get_success(stor.db_pool.updates.run_background_updates(False)) @@ -538,11 +551,7 @@ class HomeserverTestCase(TestCase): """ self.reactor.pump([by] * 100) - def get_success( - self, - d: Awaitable[TV], - by: float = 0.0, - ) -> TV: + def get_success(self, d: Awaitable[TV], by: float = 0.0) -> TV: deferred: Deferred[TV] = ensureDeferred(d) # type: ignore[arg-type] self.pump(by=by) return self.successResultOf(deferred) @@ -755,7 +764,7 @@ class FederatingHomeserverTestCase(HomeserverTestCase): OTHER_SERVER_NAME = "other.example.com" OTHER_SERVER_SIGNATURE_KEY = signedjson.key.generate_signing_key("test") - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) # poke the other server's signing key into the key store, so that we don't @@ -879,7 +888,7 @@ def _auth_header_for_request( ) -def override_config(extra_config): +def override_config(extra_config: JsonDict) -> Callable[[TV], TV]: """A decorator which can be applied to test functions to give additional HS config For use @@ -892,12 +901,13 @@ def override_config(extra_config): ... Args: - extra_config(dict): Additional config settings to be merged into the default + extra_config: Additional config settings to be merged into the default config dict before instantiating the test homeserver. """ - def decorator(func): - func._extra_config = extra_config + def decorator(func: TV) -> TV: + # This attribute is being defined. + func._extra_config = extra_config # type: ignore[attr-defined] return func return decorator -- cgit 1.4.1 From 583f22780f44157c50bc2dc5c242e88cc18c7886 Mon Sep 17 00:00:00 2001 From: Šimon Brandner Date: Wed, 27 Jul 2022 20:46:57 +0200 Subject: Use stable prefixes for MSC3827: filtering of `/publicRooms` by room type (#13370) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Šimon Brandner --- changelog.d/13370.feature | 1 + synapse/api/constants.py | 2 +- synapse/config/experimental.py | 3 --- synapse/handlers/room_list.py | 2 +- synapse/rest/client/versions.py | 4 ++-- synapse/storage/databases/main/room.py | 2 +- tests/rest/client/test_rooms.py | 5 ++--- 7 files changed, 8 insertions(+), 11 deletions(-) create mode 100644 changelog.d/13370.feature diff --git a/changelog.d/13370.feature b/changelog.d/13370.feature new file mode 100644 index 0000000000..3a49bc2778 --- /dev/null +++ b/changelog.d/13370.feature @@ -0,0 +1 @@ +Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 2653764119..789859e69e 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -268,4 +268,4 @@ class PublicRoomsFilterFields: """ GENERIC_SEARCH_TERM: Final = "generic_search_term" - ROOM_TYPES: Final = "org.matrix.msc3827.room_types" + ROOM_TYPES: Final = "room_types" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 1902222d7b..c2ecd977cd 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -88,8 +88,5 @@ class ExperimentalConfig(Config): # MSC3715: dir param on /relations. self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False) - # MSC3827: Filtering of /publicRooms by room type - self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False) - # MSC3848: Introduce errcodes for specific event sending failures self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 29868eb743..bb0bdb8e6f 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -182,7 +182,7 @@ class RoomListHandler: == HistoryVisibility.WORLD_READABLE, "guest_can_join": room["guest_access"] == "can_join", "join_rule": room["join_rules"], - "org.matrix.msc3827.room_type": room["room_type"], + "room_type": room["room_type"], } # Filter out Nones – rather omit the field altogether diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index f4f06563dd..0366986755 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -95,8 +95,8 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, # Supports receiving private read receipts as per MSC2285 "org.matrix.msc2285": self.config.experimental.msc2285_enabled, - # Supports filtering of /publicRooms by room type MSC3827 - "org.matrix.msc3827": self.config.experimental.msc3827_enabled, + # Supports filtering of /publicRooms by room type as per MSC3827 + "org.matrix.msc3827.stable": True, # Adds support for importing historical messages as per MSC2716 "org.matrix.msc2716": self.config.experimental.msc2716_enabled, # Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030 diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index d6d485507b..0f1f0d11ea 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -207,7 +207,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _construct_room_type_where_clause( self, room_types: Union[List[Union[str, None]], None] ) -> Tuple[Union[str, None], List[str]]: - if not room_types or not self.config.experimental.msc3827_enabled: + if not room_types: return None, [] else: # We use None when we want get rooms without a type diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 2272d55d84..aa2f578441 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -2070,7 +2070,6 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): config = self.default_config() config["allow_public_rooms_without_auth"] = True - config["experimental_features"] = {"msc3827_enabled": True} self.hs = self.setup_test_homeserver(config=config) self.url = b"/_matrix/client/r0/publicRooms" @@ -2123,13 +2122,13 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): chunk, count = self.make_public_rooms_request([None]) self.assertEqual(count, 1) - self.assertEqual(chunk[0].get("org.matrix.msc3827.room_type", None), None) + self.assertEqual(chunk[0].get("room_type", None), None) def test_returns_only_space_based_on_filter(self) -> None: chunk, count = self.make_public_rooms_request(["m.space"]) self.assertEqual(count, 1) - self.assertEqual(chunk[0].get("org.matrix.msc3827.room_type", None), "m.space") + self.assertEqual(chunk[0].get("room_type", None), "m.space") def test_returns_both_rooms_and_space_based_on_filter(self) -> None: chunk, count = self.make_public_rooms_request(["m.space", None]) -- cgit 1.4.1 From 24ef1460f691c0fb40470784603c6b0c34629d09 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 29 Jul 2022 11:09:57 +0200 Subject: Explicitly mention which resources support compression in the config guide (#13221) --- changelog.d/13221.doc | 1 + docs/usage/configuration/config_documentation.md | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13221.doc diff --git a/changelog.d/13221.doc b/changelog.d/13221.doc new file mode 100644 index 0000000000..dd2b3d8972 --- /dev/null +++ b/changelog.d/13221.doc @@ -0,0 +1 @@ +Document which HTTP resources support gzip compression. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index eefcc7829d..5558b0d2ec 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -486,7 +486,8 @@ Sub-options for each listener include: * `names`: a list of names of HTTP resources. See below for a list of valid resource names. - * `compress`: set to true to enable HTTP compression for this resource. + * `compress`: set to true to enable gzip compression on HTTP bodies for this resource. This is currently only supported with the + `client`, `consent` and `metrics` resources. * `additional_resources`: Only valid for an 'http' listener. A map of additional endpoints which should be loaded via dynamic modules. -- cgit 1.4.1 From 98fb610cc043e4f6ba77f78aaecef6b646bf61d6 Mon Sep 17 00:00:00 2001 From: 3nprob <74199244+3nprob@users.noreply.github.com> Date: Fri, 29 Jul 2022 10:29:23 +0000 Subject: Revert "Drop support for delegating email validation (#13192)" (#13406) Reverts commit fa71bb18b527d1a3e2629b48640ea67fff2f8c59, and tweaks documentation. Signed-off-by: 3nprob --- changelog.d/13406.misc | 1 + docs/upgrade.md | 13 - docs/usage/configuration/config_documentation.md | 362 +++++++++++------------ synapse/app/homeserver.py | 3 +- synapse/config/emailconfig.py | 46 ++- synapse/config/registration.py | 14 +- synapse/handlers/identity.py | 56 +++- synapse/handlers/ui_auth/checkers.py | 21 +- synapse/rest/client/account.py | 106 ++++--- synapse/rest/client/register.py | 59 ++-- synapse/rest/synapse/client/password_reset.py | 8 +- tests/rest/client/test_register.py | 2 +- 12 files changed, 425 insertions(+), 266 deletions(-) create mode 100644 changelog.d/13406.misc diff --git a/changelog.d/13406.misc b/changelog.d/13406.misc new file mode 100644 index 0000000000..f78e052e87 --- /dev/null +++ b/changelog.d/13406.misc @@ -0,0 +1 @@ +Warn instead of error when using `account_threepid_delegates.email`, which was deprecated in 1.64.0rc1. diff --git a/docs/upgrade.md b/docs/upgrade.md index fadb8e7ffb..73ed209975 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -91,19 +91,6 @@ process, for example: # Upgrading to v1.64.0 -## Delegation of email validation no longer supported - -As of this version, Synapse no longer allows the tasks of verifying email address -ownership, and password reset confirmation, to be delegated to an identity server. - -To continue to allow users to add email addresses to their homeserver accounts, -and perform password resets, make sure that Synapse is configured with a -working email server in the `email` configuration section (including, at a -minimum, a `notif_from` setting.) - -Specifying an `email` setting under `account_threepid_delegates` will now cause -an error at startup. - ## Changes to the event replication streams Synapse now includes a flag indicating if an event is an outlier when diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index eefcc7829d..d8616f7dbd 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1,11 +1,11 @@ # Configuring Synapse -This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified -through the many configuration settings documented here — each config option is explained, +This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified +through the many configuration settings documented here — each config option is explained, including what the default is, how to change the default and what sort of behaviour the setting governs. -Also included is an example configuration for each setting. If you don't want to spend a lot of time +Also included is an example configuration for each setting. If you don't want to spend a lot of time thinking about options, the config as generated sets sensible defaults for all values. Do note however that the -database defaults to SQLite, which is not recommended for production usage. You can read more on this subject +database defaults to SQLite, which is not recommended for production usage. You can read more on this subject [here](../../setup/installation.md#using-postgresql). ## Config Conventions @@ -26,17 +26,17 @@ messages from the database after 5 minutes, rather than 5 months. In addition, configuration options referring to size use the following suffixes: * `M` = MiB, or 1,048,576 bytes -* `K` = KiB, or 1024 bytes +* `K` = KiB, or 1024 bytes For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes -for a user avatar. +for a user avatar. -### YAML +### YAML The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules apply if you want your config file to be read properly. A few helpful things to know: -* `#` before any option in the config will comment out that setting and either a default (if available) will +* `#` before any option in the config will comment out that setting and either a default (if available) will be applied or Synapse will ignore the setting. Thus, in example #1 below, the setting will be read and - applied, but in example #2 the setting will not be read and a default will be applied. + applied, but in example #2 the setting will not be read and a default will be applied. Example #1: ```yaml @@ -50,13 +50,13 @@ apply if you want your config file to be read properly. A few helpful things to will determine whether a given setting is read as part of another setting, or considered on its own. Thus, in example #1, the `enabled` setting is read as a sub-option of the `presence` setting, and will be properly applied. - + However, the lack of indentation before the `enabled` setting in example #2 means that when reading the config, Synapse will consider both `presence` and `enabled` as different settings. In this case, `presence` has no value, and thus a default applied, and `enabled` is an option that Synapse doesn't recognize and thus ignores. - - Example #1: + + Example #1: ```yaml presence: enabled: false @@ -66,11 +66,11 @@ apply if you want your config file to be read properly. A few helpful things to presence: enabled: false ``` - In this manual, all top-level settings (ones with no indentation) are identified - at the beginning of their section (i.e. "### `example_setting`") and - the sub-options, if any, are identified and listed in the body of the section. + In this manual, all top-level settings (ones with no indentation) are identified + at the beginning of their section (i.e. "### `example_setting`") and + the sub-options, if any, are identified and listed in the body of the section. In addition, each setting has an example of its usage, with the proper indentation - shown. + shown. ## Contents [Modules](#modules) @@ -126,7 +126,7 @@ documentation on how to configure or create custom modules for Synapse. --- ### `modules` -Use the `module` sub-option to add modules under this option to extend functionality. +Use the `module` sub-option to add modules under this option to extend functionality. The `module` setting then has a sub-option, `config`, which can be used to define some configuration for the `module`. @@ -166,11 +166,11 @@ The `server_name` cannot be changed later so it is important to configure this correctly before you start Synapse. It should be all lowercase and may contain an explicit port. -There is no default for this option. - +There is no default for this option. + Example configuration #1: ```yaml -server_name: matrix.org +server_name: matrix.org ``` Example configuration #2: ```yaml @@ -188,7 +188,7 @@ pid_file: DATADIR/homeserver.pid --- ### `web_client_location` -The absolute URL to the web client which `/` will redirect to. Defaults to none. +The absolute URL to the web client which `/` will redirect to. Defaults to none. Example configuration: ```yaml @@ -217,7 +217,7 @@ By default, other servers will try to reach our server on port 8448, which can be inconvenient in some environments. Provided `https:///` on port 443 is routed to Synapse, this -option configures Synapse to serve a file at `https:///.well-known/matrix/server`. +option configures Synapse to serve a file at `https:///.well-known/matrix/server`. This will tell other servers to send traffic to port 443 instead. This option currently defaults to false. @@ -235,7 +235,7 @@ serve_server_wellknown: true This option allows server runners to add arbitrary key-value pairs to the [client-facing `.well-known` response](https://spec.matrix.org/latest/client-server-api/#well-known-uri). Note that the `public_baseurl` config option must be provided for Synapse to serve a response to `/.well-known/matrix/client` at all. -If this option is provided, it parses the given yaml to json and +If this option is provided, it parses the given yaml to json and serves it on `/.well-known/matrix/client` endpoint alongside the standard properties. @@ -243,16 +243,16 @@ alongside the standard properties. Example configuration: ```yaml -extra_well_known_client_content : +extra_well_known_client_content : option1: value1 option2: value2 ``` --- ### `soft_file_limit` - + Set the soft limit on the number of file descriptors synapse can use. Zero is used to indicate synapse should set the soft limit to the hard limit. -Defaults to 0. +Defaults to 0. Example configuration: ```yaml @@ -262,8 +262,8 @@ soft_file_limit: 3 ### `presence` Presence tracking allows users to see the state (e.g online/offline) -of other local and remote users. Set the `enabled` sub-option to false to -disable presence tracking on this homeserver. Defaults to true. +of other local and remote users. Set the `enabled` sub-option to false to +disable presence tracking on this homeserver. Defaults to true. This option replaces the previous top-level 'use_presence' option. Example configuration: @@ -274,8 +274,8 @@ presence: --- ### `require_auth_for_profile_requests` -Whether to require authentication to retrieve profile data (avatars, display names) of other -users through the client API. Defaults to false. Note that profile data is also available +Whether to require authentication to retrieve profile data (avatars, display names) of other +users through the client API. Defaults to false. Note that profile data is also available via the federation API, unless `allow_profile_lookup_over_federation` is set to false. Example configuration: @@ -286,11 +286,11 @@ require_auth_for_profile_requests: true ### `limit_profile_requests_to_users_who_share_rooms` Use this option to require a user to share a room with another user in order -to retrieve their profile information. Only checked on Client-Server +to retrieve their profile information. Only checked on Client-Server requests. Profile requests from other servers should be checked by the requesting server. Defaults to false. -Example configuration: +Example configuration: ```yaml limit_profile_requests_to_users_who_share_rooms: true ``` @@ -336,7 +336,7 @@ The default room version for newly created rooms on this server. Known room versions are listed [here](https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions) For example, for room version 1, `default_room_version` should be set -to "1". +to "1". Currently defaults to "9". @@ -348,7 +348,7 @@ default_room_version: "8" ### `gc_thresholds` The garbage collection threshold parameters to pass to `gc.set_threshold`, if defined. -Defaults to none. +Defaults to none. Example configuration: ```yaml @@ -358,7 +358,7 @@ gc_thresholds: [700, 10, 10] ### `gc_min_interval` The minimum time in seconds between each GC for a generation, regardless of -the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` +the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive generation 0 GCs, etc. Defaults to `[1s, 10s, 30s]`. @@ -400,7 +400,7 @@ enable_search: false ``` --- ### `ip_range_blacklist` - + This option prevents outgoing requests from being sent to the specified blacklisted IP address CIDR ranges. If this option is not specified then it defaults to private IP address ranges (see the example below). @@ -463,13 +463,13 @@ configuration. Sub-options for each listener include: -* `port`: the TCP port to bind to. +* `port`: the TCP port to bind to. * `bind_addresses`: a list of local addresses to listen on. The default is 'all local interfaces'. * `type`: the type of listener. Normally `http`, but other valid options are: - + * `manhole`: (see the docs [here](../../manhole.md)), * `metrics`: (see the docs [here](../../metrics-howto.md)), @@ -585,7 +585,7 @@ forward extremities reaches a given threshold, Synapse will send an `org.matrix.dummy_event` event, which will reduce the forward extremities in the room. -This setting defines the threshold (i.e. number of forward extremities in the room) at which dummy events are sent. +This setting defines the threshold (i.e. number of forward extremities in the room) at which dummy events are sent. The default value is 10. Example configuration: @@ -612,7 +612,7 @@ Useful options for Synapse admins. ### `admin_contact` -How to reach the server admin, used in `ResourceLimitError`. Defaults to none. +How to reach the server admin, used in `ResourceLimitError`. Defaults to none. Example configuration: ```yaml @@ -622,7 +622,7 @@ admin_contact: 'mailto:admin@server.com' ### `hs_disabled` and `hs_disabled_message` Blocks users from connecting to the homeserver and provides a human-readable reason -why the connection was blocked. Defaults to false. +why the connection was blocked. Defaults to false. Example configuration: ```yaml @@ -632,20 +632,20 @@ hs_disabled_message: 'Reason for why the HS is blocked' --- ### `limit_usage_by_mau` -This option disables/enables monthly active user blocking. Used in cases where the admin or -server owner wants to limit to the number of monthly active users. When enabled and a limit is +This option disables/enables monthly active user blocking. Used in cases where the admin or +server owner wants to limit to the number of monthly active users. When enabled and a limit is reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`. Defaults to false. If this is enabled, a value for `max_mau_value` must also be set. Example configuration: ```yaml -limit_usage_by_mau: true +limit_usage_by_mau: true ``` --- ### `max_mau_value` -This option sets the hard limit of monthly active users above which the server will start -blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0. +This option sets the hard limit of monthly active users above which the server will start +blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0. Example configuration: ```yaml @@ -658,7 +658,7 @@ The option `mau_trial_days` is a means to add a grace period for active users. I means that users must be active for the specified number of days before they can be considered active and guards against the case where lots of users sign up in a short space of time never to return after their initial -session. Defaults to 0. +session. Defaults to 0. Example configuration: ```yaml @@ -674,7 +674,7 @@ use the value of `mau_trial_days` instead. Example configuration: ```yaml -mau_appservice_trial_days: +mau_appservice_trial_days: my_appservice_id: 3 another_appservice_id: 6 ``` @@ -696,7 +696,7 @@ mau_limit_alerting: false If enabled, the metrics for the number of monthly active users will be populated, however no one will be limited based on these numbers. If `limit_usage_by_mau` -is true, this is implied to be true. Defaults to false. +is true, this is implied to be true. Defaults to false. Example configuration: ```yaml @@ -720,7 +720,7 @@ mau_limit_reserved_threepids: ### `server_context` This option is used by phonehome stats to group together related servers. -Defaults to none. +Defaults to none. Example configuration: ```yaml @@ -736,11 +736,11 @@ resource-constrained. Options for this setting include: * `enabled`: whether this check is enabled. Defaults to false. * `complexity`: the limit above which rooms cannot be joined. The default is 1.0. * `complexity_error`: override the error which is returned when the room is too complex with a - custom message. + custom message. * `admins_can_join`: allow server admins to join complex rooms. Default is false. Room complexity is an arbitrary measure based on factors such as the number of -users in the room. +users in the room. Example configuration: ```yaml @@ -775,7 +775,7 @@ allow_per_room_profiles: false ### `max_avatar_size` The largest permissible file size in bytes for a user avatar. Defaults to no restriction. -Use M for MB and K for KB. +Use M for MB and K for KB. Note that user avatar changes will not work if this is set without using Synapse's media repository. @@ -808,7 +808,7 @@ Example configuration: redaction_retention_period: 28d ``` --- -### `user_ips_max_age` +### `user_ips_max_age` How long to track users' last seen time and IPs in the database. @@ -823,7 +823,7 @@ user_ips_max_age: 14d Inhibits the `/requestToken` endpoints from returning an error that might leak information about whether an e-mail address is in use or not on this -homeserver. Defaults to false. +homeserver. Defaults to false. Note that for some endpoints the error situation is the e-mail already being used, and for others the error is entering the e-mail being unused. If this option is enabled, instead of returning an error, these endpoints will @@ -859,9 +859,9 @@ next_link_domain_whitelist: ["matrix.org"] ### `templates` and `custom_template_directory` These options define templates to use when generating email or HTML page contents. -The `custom_template_directory` determines which directory Synapse will try to +The `custom_template_directory` determines which directory Synapse will try to find template files in to use to generate email or HTML page contents. -If not set, or a file is not found within the template directory, a default +If not set, or a file is not found within the template directory, a default template from within the Synapse package will be used. See [here](../../templates.md) for more @@ -884,26 +884,26 @@ the `allowed_lifetime_min` and `allowed_lifetime_max` config options. If this feature is enabled, Synapse will regularly look for and purge events which are older than the room's maximum retention period. Synapse will also -filter events received over federation so that events that should have been -purged are ignored and not stored again. +filter events received over federation so that events that should have been +purged are ignored and not stored again. The message retention policies feature is disabled by default. This setting has the following sub-options: * `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the - 'm.room.retention' state event. This option is further specified by the - `min_lifetime` and `max_lifetime` sub-options associated with it. Note that the - value of `min_lifetime` doesn't matter much because Synapse doesn't take it into account yet. + 'm.room.retention' state event. This option is further specified by the + `min_lifetime` and `max_lifetime` sub-options associated with it. Note that the + value of `min_lifetime` doesn't matter much because Synapse doesn't take it into account yet. -* `allowed_lifetime_min` and `allowed_lifetime_max`: Retention policy limits. If - set, and the state of a room contains a `m.room.retention` event in its state +* `allowed_lifetime_min` and `allowed_lifetime_max`: Retention policy limits. If + set, and the state of a room contains a `m.room.retention` event in its state which contains a `min_lifetime` or a `max_lifetime` that's out of these bounds, Synapse will cap the room's policy to these limits when running purge jobs. * `purge_jobs` and the associated `shortest_max_lifetime` and `longest_max_lifetime` sub-options: Server admins can define the settings of the background jobs purging the events whose lifetime has expired under the `purge_jobs` section. - + If no configuration is provided for this option, a single job will be set up to delete expired events in every room daily. @@ -915,7 +915,7 @@ This setting has the following sub-options: range are optional, e.g. a job with no `shortest_max_lifetime` and a `longest_max_lifetime` of '3d' will handle every room with a retention policy whose `max_lifetime` is lower than or equal to three days. - + The rationale for this per-job configuration is that some rooms might have a retention policy with a low `max_lifetime`, where history needs to be purged of outdated messages on a more frequent basis than for the rest of the rooms @@ -944,7 +944,7 @@ retention: - longest_max_lifetime: 3d interval: 12h - shortest_max_lifetime: 3d - interval: 1d + interval: 1d ``` --- ## TLS ## @@ -956,11 +956,11 @@ Options related to TLS. This option specifies a PEM-encoded X509 certificate for TLS. This certificate, as of Synapse 1.0, will need to be a valid and verifiable -certificate, signed by a recognised Certificate Authority. Defaults to none. +certificate, signed by a recognised Certificate Authority. Defaults to none. Be sure to use a `.pem` file that includes the full certificate chain including any intermediate certificates (for instance, if using certbot, use -`fullchain.pem` as your certificate, not `cert.pem`). +`fullchain.pem` as your certificate, not `cert.pem`). Example configuration: ```yaml @@ -969,7 +969,7 @@ tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt" --- ### `tls_private_key_path` -PEM-encoded private key for TLS. Defaults to none. +PEM-encoded private key for TLS. Defaults to none. Example configuration: ```yaml @@ -1126,31 +1126,31 @@ Caching can be configured through the following sub-options: This can also be set by the `SYNAPSE_CACHE_FACTOR` environment variable. Setting by environment variable takes priority over setting through the config file. - + Defaults to 0.5, which will halve the size of all caches. * `per_cache_factors`: A dictionary of cache name to cache factor for that individual cache. Overrides the global cache factor for a given cache. - + These can also be set through environment variables comprised of `SYNAPSE_CACHE_FACTOR_` + the name of the cache in capital letters and underscores. Setting by environment variable takes priority over setting through the config file. Ex. `SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0` - + Some caches have '*' and other characters that are not alphanumeric or underscores. These caches can be named with or without the special characters stripped. For example, to specify the cache factor for `*stateGroupCache*` via an environment variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. - + * `expire_caches`: Controls whether cache entries are evicted after a specified time period. Defaults to true. Set to false to disable this feature. Note that never expiring - caches may result in excessive memory usage. + caches may result in excessive memory usage. * `cache_entry_ttl`: If `expire_caches` is enabled, this flag controls how long an entry can be in a cache without having been accessed before being evicted. - Defaults to 30m. + Defaults to 30m. * `sync_response_cache_duration`: Controls how long the results of a /sync request are cached for after a successful response is returned. A higher duration can help clients @@ -1161,8 +1161,8 @@ Caching can be configured through the following sub-options: *Changed in Synapse 1.62.0*: The default was changed from 0 to 2m. * `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and - `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory - usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu) + `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory + usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu) to utilize this option, and all three of the options must be specified for this feature to work. This option defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided. @@ -1175,7 +1175,7 @@ Caching can be configured through the following sub-options: for this option. * `min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches - from being emptied while Synapse is evicting due to memory. There is no default value for this option. + from being emptied while Synapse is evicting due to memory. There is no default value for this option. Example configuration: ```yaml @@ -1199,7 +1199,7 @@ The cache factors (i.e. `caches.global_factor` and `caches.per_cache_factors`) kill -HUP [PID_OF_SYNAPSE_PROCESS] ``` -If you are running multiple workers, you must individually update the worker +If you are running multiple workers, you must individually update the worker config file and send this signal to each worker process. If you're using the [example systemd service](https://github.com/matrix-org/synapse/blob/develop/contrib/systemd/matrix-synapse.service) @@ -1219,7 +1219,7 @@ its data. Associated sub-options: * `name`: this option specifies the database engine to use: either `sqlite3` (for SQLite) - or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite. + or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite. * `txn_limit` gives the maximum number of transactions to run per connection before reconnecting. Defaults to 0, which means no limit. @@ -1355,7 +1355,7 @@ databases: ``` --- ## Logging ## -Config options related to logging. +Config options related to logging. --- ### `log_config` @@ -1368,7 +1368,7 @@ log_config: "CONFDIR/SERVERNAME.log.config" ``` --- ## Ratelimiting ## -Options related to ratelimiting in Synapse. +Options related to ratelimiting in Synapse. Each ratelimiting configuration is made of two parameters: - `per_second`: number of requests a client can send per second. @@ -1378,7 +1378,7 @@ Each ratelimiting configuration is made of two parameters: Ratelimiting settings for client messaging. - + This is a ratelimiting option for messages that ratelimits sending based on the account the client is using. It defaults to: `per_second: 0.2`, `burst_count: 10`. @@ -1392,7 +1392,7 @@ rc_message: ### `rc_registration` This option ratelimits registration requests based on the client's IP address. -It defaults to `per_second: 0.17`, `burst_count: 3`. +It defaults to `per_second: 0.17`, `burst_count: 3`. Example configuration: ```yaml @@ -1403,7 +1403,7 @@ rc_registration: --- ### `rc_registration_token_validity` -This option checks the validity of registration tokens that ratelimits requests based on +This option checks the validity of registration tokens that ratelimits requests based on the client's IP address. Defaults to `per_second: 0.1`, `burst_count: 5`. @@ -1412,18 +1412,18 @@ Example configuration: rc_registration_token_validity: per_second: 0.3 burst_count: 6 -``` +``` --- ### `rc_login` This option specifies several limits for login: * `address` ratelimits login requests based on the client's IP address. Defaults to `per_second: 0.17`, `burst_count: 3`. - + * `account` ratelimits login requests based on the account the client is attempting to log into. Defaults to `per_second: 0.17`, `burst_count: 3`. - + * `failted_attempts` ratelimits login requests based on the account the client is attempting to log into, based on the amount of failed login attempts for this account. Defaults to `per_second: 0.17`, `burst_count: 3`. @@ -1444,9 +1444,9 @@ rc_login: --- ### `rc_admin_redaction` -This option sets ratelimiting redactions by room admins. If this is not explicitly +This option sets ratelimiting redactions by room admins. If this is not explicitly set then it uses the same ratelimiting as per `rc_message`. This is useful -to allow room admins to deal with abuse quickly. +to allow room admins to deal with abuse quickly. Example configuration: ```yaml @@ -1459,12 +1459,12 @@ rc_admin_redaction: This option allows for ratelimiting number of rooms a user can join. This setting has the following sub-options: -* `local`: ratelimits when users are joining rooms the server is already in. +* `local`: ratelimits when users are joining rooms the server is already in. Defaults to `per_second: 0.1`, `burst_count: 10`. * `remote`: ratelimits when users are trying to join rooms not on the server (which can be more computationally expensive than restricting locally). Defaults to - `per_second: 0.01`, `burst_count: 10` + `per_second: 0.01`, `burst_count: 10` Example configuration: ```yaml @@ -1510,9 +1510,9 @@ rc_3pid_validation: --- ### `rc_invites` -This option sets ratelimiting how often invites can be sent in a room or to a +This option sets ratelimiting how often invites can be sent in a room or to a specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and -`per_user` defaults to `per_second: 0.003`, `burst_count: 5`. +`per_user` defaults to `per_second: 0.003`, `burst_count: 5`. Client requests that invite user(s) when [creating a room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom) @@ -1562,7 +1562,7 @@ rc_third_party_invite: --- ### `rc_federation` -Defines limits on federation requests. +Defines limits on federation requests. The `rc_federation` configuration has the following sub-options: * `window_size`: window size in milliseconds. Defaults to 1000. @@ -1591,7 +1591,7 @@ Sets outgoing federation transaction frequency for sending read-receipts, per-room. If we end up trying to send out more read-receipts, they will get buffered up -into fewer transactions. Defaults to 50. +into fewer transactions. Defaults to 50. Example configuration: ```yaml @@ -1602,9 +1602,9 @@ federation_rr_transactions_per_room_per_second: 40 Config options related to Synapse's media store. --- -### `enable_media_repo` +### `enable_media_repo` -Enable the media store service in the Synapse master. Defaults to true. +Enable the media store service in the Synapse master. Defaults to true. Set to false if you are using a separate media store worker. Example configuration: @@ -1629,7 +1629,7 @@ locations. Defaults to none. Associated sub-options are: * `store_local`: whether to store newly uploaded local files * `store_remote`: whether to store newly downloaded local files * `store_synchronous`: whether to wait for successful storage for local uploads -* `config`: sets a path to the resource through the `directory` option +* `config`: sets a path to the resource through the `directory` option Example configuration: ```yaml @@ -1648,7 +1648,7 @@ The largest allowed upload size in bytes. If you are using a reverse proxy you may also need to set this value in your reverse proxy's config. Defaults to 50M. Notably Nginx has a small max body size by default. -See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse. +See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse. Example configuration: ```yaml @@ -1670,14 +1670,14 @@ Whether to generate new thumbnails on the fly to precisely match the resolution requested by the client. If true then whenever a new resolution is requested by the client the server will generate a new thumbnail. If false the server will pick a thumbnail -from a precalculated list. Defaults to false. +from a precalculated list. Defaults to false. Example configuration: ```yaml dynamic_thumbnails: true ``` --- -### `thumbnail_sizes` +### `thumbnail_sizes` List of thumbnails to precalculate when an image is uploaded. Associated sub-options are: * `width` @@ -1795,7 +1795,7 @@ This option sets a list of IP address CIDR ranges that the URL preview spider is to access even if they are specified in `url_preview_ip_range_blacklist`. This is useful for specifying exceptions to wide-ranging blacklisted target IP ranges - e.g. for enabling URL previews for a specific private -website only visible in your network. Defaults to none. +website only visible in your network. Defaults to none. Example configuration: ```yaml @@ -1813,7 +1813,7 @@ This is more useful if you know there is an entire shape of URL that you know that will never want synapse to try to spider. Each list entry is a dictionary of url component attributes as returned -by urlparse.urlsplit as applied to the absolute form of the URL. See +by urlparse.urlsplit as applied to the absolute form of the URL. See [here](https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit) for more information. Some examples are: @@ -1888,8 +1888,8 @@ Example configuration: oEmbed allows for easier embedding content from a website. It can be used for generating URLs previews of services which support it. A default list of oEmbed providers is included with Synapse. Set `disable_default_providers` to true to disable using -these default oEmbed URLs. Use `additional_providers` to specify additional files with oEmbed configuration (each -should be in the form of providers.json). By default this list is empty. +these default oEmbed URLs. Use `additional_providers` to specify additional files with oEmbed configuration (each +should be in the form of providers.json). By default this list is empty. Example configuration: ```yaml @@ -1906,7 +1906,7 @@ See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha. --- ### `recaptcha_public_key` -This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is +This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is enabled. Example configuration: @@ -1914,9 +1914,9 @@ Example configuration: recaptcha_public_key: "YOUR_PUBLIC_KEY" ``` --- -### `recaptcha_private_key` +### `recaptcha_private_key` -This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is +This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is enabled. Example configuration: @@ -1927,7 +1927,7 @@ recaptcha_private_key: "YOUR_PRIVATE_KEY" ### `enable_registration_captcha` Set to true to enable ReCaptcha checks when registering, preventing signup -unless a captcha is answered. Requires a valid ReCaptcha public/private key. +unless a captcha is answered. Requires a valid ReCaptcha public/private key. Defaults to false. Example configuration: @@ -2005,7 +2005,7 @@ Registration can be rate-limited using the parameters in the [Ratelimiting](#rat ### `enable_registration` Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration, -you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration +you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration without any verification, you must also set `enable_registration_without_verification` to true. Example configuration: @@ -2029,7 +2029,7 @@ Time that a user's session remains valid for, after they log in. Note that this is not currently compatible with guest logins. -Note also that this is calculated at login time: changes are not applied retrospectively to users who have already +Note also that this is calculated at login time: changes are not applied retrospectively to users who have already logged in. By default, this is infinite. @@ -2047,7 +2047,7 @@ For more information about refresh tokens, please see the [manual](user_authenti Note that this only applies to clients which advertise support for refresh tokens. -Note also that this is calculated at login time and refresh time: changes are not applied to +Note also that this is calculated at login time and refresh time: changes are not applied to existing sessions until they are refreshed. By default, this is 5 minutes. @@ -2145,7 +2145,7 @@ Require users to submit a token during registration. Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md). Note that `enable_registration` must be set to true. Disabling this option will not delete any tokens previously generated. -Defaults to false. Set to true to enable. +Defaults to false. Set to true to enable. Example configuration: ```yaml @@ -2215,7 +2215,7 @@ their account. by the Matrix Identity Service API [specification](https://matrix.org/docs/spec/identity_service/latest).) -*Updated in Synapse 1.64.0*: No longer accepts an `email` option. +*Updated in Synapse 1.64.0*: The `email` option is deprecated. Example configuration: ```yaml @@ -2270,7 +2270,7 @@ By default, any room aliases included in this list will be created as a publicly joinable room when the first user registers for the homeserver. If the room already exists, make certain it is a publicly joinable room, i.e. the join rule of the room must be set to 'public'. You can find more options -relating to auto-joining rooms below. +relating to auto-joining rooms below. Example configuration: ```yaml @@ -2324,9 +2324,9 @@ effect if `autocreate_auto_join_rooms` is true. Possible values for this option are: * "public_chat": the room is joinable by anyone, including federated servers if `autocreate_auto_join_rooms_federated` is true (the default). -* "private_chat": an invitation is required to join these rooms. +* "private_chat": an invitation is required to join these rooms. * "trusted_private_chat": an invitation is required to join this room and the invitee is - assigned a power level of 100 upon joining the room. + assigned a power level of 100 upon joining the room. If a value of "private_chat" or "trusted_private_chat" is used then `auto_join_mxid_localpart` must also be configured. @@ -2363,7 +2363,7 @@ auto_join_mxid_localpart: system ``` --- ### `auto_join_rooms_for_guests` - + When `auto_join_rooms` is specified, setting this flag to false prevents guest accounts from being automatically joined to the rooms. @@ -2375,7 +2375,7 @@ auto_join_rooms_for_guests: false ``` --- ### `inhibit_user_in_use_error` - + Whether to inhibit errors raised when registering a new account if the user ID already exists. If turned on, requests to `/register/available` will always show a user ID as available, and Synapse won't raise an error when starting @@ -2395,7 +2395,7 @@ Config options related to metrics. --- ### `enable_metrics` -Set to true to enable collection and rendering of performance metrics. +Set to true to enable collection and rendering of performance metrics. Defaults to false. Example configuration: @@ -2406,11 +2406,11 @@ enable_metrics: true ### `sentry` Use this option to enable sentry integration. Provide the DSN assigned to you by sentry -with the `dsn` setting. +with the `dsn` setting. NOTE: While attempts are made to ensure that the logs don't contain any sensitive information, this cannot be guaranteed. By enabling -this option the sentry server may therefore receive sensitive +this option the sentry server may therefore receive sensitive information, and it in turn may then disseminate sensitive information through insecure notification channels if so configured. @@ -2424,7 +2424,7 @@ sentry: Flags to enable Prometheus metrics which are not suitable to be enabled by default, either for performance reasons or limited use. -Currently the only option is `known_servers`, which publishes +Currently the only option is `known_servers`, which publishes `synapse_federation_known_servers`, a gauge of the number of servers this homeserver knows about, including itself. May cause performance problems on large homeservers. @@ -2468,7 +2468,7 @@ Config settings related to the client/server API ### `room_prejoin_state:` Controls for the state that is shared with users who receive an invite -to a room. By default, the following state event types are shared with users who +to a room. By default, the following state event types are shared with users who receive invites to the room: - m.room.join_rules - m.room.canonical_alias @@ -2479,7 +2479,7 @@ receive invites to the room: - m.room.topic To change the default behavior, use the following sub-options: -* `disable_default_event_types`: set to true to disable the above defaults. If this +* `disable_default_event_types`: set to true to disable the above defaults. If this is enabled, only the event types listed in `additional_event_types` are shared. Defaults to false. * `additional_event_types`: Additional state event types to share with users when they are invited @@ -2569,7 +2569,7 @@ Example configuration: ```yaml signing_key_path: "CONFDIR/SERVERNAME.signing.key" ``` ---- +--- ### `old_signing_keys` The keys that the server used to sign messages with but won't use @@ -2621,7 +2621,7 @@ Options for each entry in the list include: If specified, we will check that the response is signed by at least one of the given keys. * `accept_keys_insecurely`: a boolean. Normally, if `verify_keys` is unset, - and `federation_verify_certificates` is not `true`, synapse will refuse + and `federation_verify_certificates` is not `true`, synapse will refuse to start, because this would allow anyone who can spoof DNS responses to masquerade as the trusted key server. If you know what you are doing and are sure that your network environment provides a secure connection @@ -2699,15 +2699,15 @@ This setting has the following sub-options: * `service`: By default, the user has to go to our login page first. If you'd like to allow IdP-initiated login, set `allow_unsolicited` to true under `sp` in the `service` section. -* `config_path`: specify a separate pysaml2 configuration file thusly: +* `config_path`: specify a separate pysaml2 configuration file thusly: `config_path: "CONFDIR/sp_conf.py"` * `saml_session_lifetime`: The lifetime of a SAML session. This defines how long a user has to complete the authentication process, if `allow_unsolicited` is unset. The default is 15 minutes. -* `user_mapping_provider`: Using this option, an external module can be provided as a - custom solution to mapping attributes returned from a saml provider onto a matrix user. The +* `user_mapping_provider`: Using this option, an external module can be provided as a + custom solution to mapping attributes returned from a saml provider onto a matrix user. The `user_mapping_provider` has the following attributes: - * `module`: The custom module's class. - * `config`: Custom configuration values for the module. Use the values provided in the + * `module`: The custom module's class. + * `config`: Custom configuration values for the module. Use the values provided in the example if you are using the built-in user_mapping_provider, or provide your own config values for a custom class if you are using one. This section will be passed as a Python dictionary to the module's `parse_config` method. The built-in provider takes the following two @@ -2724,7 +2724,7 @@ This setting has the following sub-options: MXID was always calculated dynamically rather than stored in a table. For backwards- compatibility, we will look for `user_ids` matching such a pattern before creating a new account. This setting controls the SAML attribute which will be used for this backwards-compatibility lookup. Typically it should be 'uid', but if the attribute maps are changed, it may be necessary to change it. - The default is 'uid'. + The default is 'uid'. * `attribute_requirements`: It is possible to configure Synapse to only allow logins if SAML attributes match particular values. The requirements can be listed under `attribute_requirements` as shown in the example. All of the listed attributes must @@ -2732,7 +2732,7 @@ This setting has the following sub-options: * `idp_entityid`: If the metadata XML contains multiple IdP entities then the `idp_entityid` option must be set to the entity to redirect users to. Most deployments only have a single IdP entity and so should omit this option. - + Once SAML support is enabled, a metadata file will be exposed at `https://:/_synapse/client/saml2/metadata.xml`, which you may be able to @@ -2793,16 +2793,16 @@ saml2_config: sur_name: "the Sysadmin" email_address": ["admin@example.com"] contact_type": technical - + saml_session_lifetime: 5m - + user_mapping_provider: - # Below options are intended for the built-in provider, they should be - # changed if using a custom module. + # Below options are intended for the built-in provider, they should be + # changed if using a custom module. config: mxid_source_attribute: displayName mxid_mapping: dotreplace - + grandfathered_mxid_source_attribute: upn attribute_requirements: @@ -2930,7 +2930,7 @@ Options for each entry include: * `localpart_template`: Jinja2 template for the localpart of the MXID. If this is not set, the user will be prompted to choose their - own username (see the documentation for the `sso_auth_account_details.html` + own username (see the documentation for the `sso_auth_account_details.html` template). This template can use the `localpart_from_email` filter. * `confirm_localpart`: Whether to prompt the user to validate (or @@ -2943,7 +2943,7 @@ Options for each entry include: * `email_template`: Jinja2 template for the email address of the user. If unset, no email address will be added to the account. - + * `extra_attributes`: a map of Jinja2 templates for extra attributes to send back to the client during login. Note that these are non-standard and clients will ignore them without modifications. @@ -2953,7 +2953,7 @@ Options for each entry include: in the ID Token. -It is possible to configure Synapse to only allow logins if certain attributes +It is possible to configure Synapse to only allow logins if certain attributes match particular values in the OIDC userinfo. The requirements can be listed under `attribute_requirements` as shown here: ```yaml @@ -2968,7 +2968,7 @@ userinfo by expanding the `scopes` section of the OIDC config to retrieve additional information from the OIDC provider. If the OIDC claim is a list, then the attribute must match any value in the list. -Otherwise, it must exactly match the value of the claim. Using the example +Otherwise, it must exactly match the value of the claim. Using the example above, the `family_name` claim MUST be "Stephensson", but the `groups` claim MUST contain "admin". @@ -3033,7 +3033,7 @@ cas_config: Additional settings to use with single-sign on systems such as OpenID Connect, SAML2 and CAS. -Server admins can configure custom templates for pages related to SSO. See +Server admins can configure custom templates for pages related to SSO. See [here](../../templates.md) for more information. Options include: @@ -3049,7 +3049,7 @@ Options include: required login flows) is whitelisted in addition to any URLs in this list. By default, this list contains only the login fallback page. * `update_profile_information`: Use this setting to keep a user's profile fields in sync with information from - the identity provider. Currently only syncing the displayname is supported. Fields + the identity provider. Currently only syncing the displayname is supported. Fields are checked on every SSO login, and are updated if necessary. Note that enabling this option will override user profile information, regardless of whether users have opted-out of syncing that @@ -3093,7 +3093,7 @@ Additional sub-options for this setting include: Required if `enabled` is set to true. * `subject_claim`: Name of the claim containing a unique identifier for the user. Optional, defaults to `sub`. -* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the +* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the "iss" claim will be required and validated for all JSON web tokens. * `audiences`: A list of audiences to validate the "aud" claim against. Optional. If provided the "aud" claim will be required and validated for all JSON web tokens. @@ -3103,7 +3103,7 @@ Additional sub-options for this setting include: Example configuration: ```yaml jwt_config: - enabled: true + enabled: true secret: "provided-by-your-issuer" algorithm: "provided-by-your-issuer" subject_claim: "name_of_claim" @@ -3114,7 +3114,7 @@ jwt_config: --- ### `password_config` -Use this setting to enable password-based logins. +Use this setting to enable password-based logins. This setting has the following sub-options: * `enabled`: Defaults to true. @@ -3123,10 +3123,10 @@ This setting has the following sub-options: to log in and reauthenticate, whilst preventing new users from setting passwords. * `localdb_enabled`: Set to false to disable authentication against the local password database. This is ignored if `enabled` is false, and is only useful - if you have other `password_providers`. Defaults to true. + if you have other `password_providers`. Defaults to true. * `pepper`: Set the value here to a secret random string for extra security. DO NOT CHANGE THIS AFTER INITIAL SETUP! -* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc. +* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc. Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows: * `enabled`: Defaults to false. Set to true to enable. * `minimum_length`: Minimum accepted length for a password. Defaults to 0. @@ -3138,7 +3138,7 @@ This setting has the following sub-options: Defaults to false. * `require_uppercase`: Whether a password must contain at least one uppercase letter. Defaults to false. - + Example configuration: ```yaml @@ -3160,7 +3160,7 @@ password_config: The amount of time to allow a user-interactive authentication session to be active. -This defaults to 0, meaning the user is queried for their credentials +This defaults to 0, meaning the user is queried for their credentials before every action, but this can be overridden to allow a single validation to be re-used. This weakens the protections afforded by the user-interactive authentication process, by allowing for multiple @@ -3188,7 +3188,7 @@ Server admins can configure custom templates for email content. See This setting has the following sub-options: * `smtp_host`: The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. * `smtp_port`: The port on the mail server for outgoing SMTP. Defaults to 465 if `force_tls` is true, else 25. - + _Changed in Synapse 1.64.0:_ the default port is now aware of `force_tls`. * `smtp_user` and `smtp_pass`: Username/password for authentication to the SMTP server. By default, no authentication is attempted. @@ -3196,7 +3196,7 @@ This setting has the following sub-options: to TLS via STARTTLS. If this option is set to true, TLS is used from the start (Implicit TLS), and the option `require_transport_security` is ignored. It is recommended to enable this if supported by your mail server. - + _New in Synapse 1.64.0._ * `require_transport_security`: Set to true to require TLS transport security for SMTP. By default, Synapse will connect over plain text, and will then switch to @@ -3231,8 +3231,8 @@ This setting has the following sub-options: message(s) have been sent to, e.g. "My super room". In addition, emails related to account administration will can use the '%(server_name)s' placeholder, which will be replaced by the value of the `server_name` setting in your Synapse configuration. - - Here is a list of subjects for notification emails that can be set: + + Here is a list of subjects for notification emails that can be set: * `message_from_person_in_room`: Subject to use to notify about one message from one or more user(s) in a room which has a name. Defaults to "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..." * `message_from_person`: Subject to use to notify about one message from one or more user(s) in a @@ -3241,13 +3241,13 @@ This setting has the following sub-options: a room which doesn't have a name. Defaults to "[%(app)s] You have messages on %(app)s from %(person)s..." * `messages_in_room`: Subject to use to notify about multiple messages in a room which has a name. Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room..." - * `messages_in_room_and_others`: Subject to use to notify about multiple messages in multiple rooms. + * `messages_in_room_and_others`: Subject to use to notify about multiple messages in multiple rooms. Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room and others..." * `messages_from_person_and_others`: Subject to use to notify about multiple messages from multiple persons in multiple rooms. This is similar to the setting above except it's used when - the room in which the notification was triggered has no name. Defaults to + the room in which the notification was triggered has no name. Defaults to "[%(app)s] You have messages on %(app)s from %(person)s and others..." - * `invite_from_person_to_room`: Subject to use to notify about an invite to a room which has a name. + * `invite_from_person_to_room`: Subject to use to notify about an invite to a room which has a name. Defaults to "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..." * `invite_from_person`: Subject to use to notify about an invite to a room which doesn't have a name. Defaults to "[%(app)s] %(person)s has invited you to chat on %(app)s..." @@ -3292,7 +3292,7 @@ Configuration settings related to push notifications --- ### `push` -This setting defines options for push notifications. +This setting defines options for push notifications. This option has a number of sub-options. They are as follows: * `include_content`: Clients requesting push notifications can either have the body of @@ -3307,7 +3307,7 @@ This option has a number of sub-options. They are as follows: notification saying only that a message arrived and who it came from. Defaults to true. Set to false to only include the event ID and room ID in push notification payloads. * `group_unread_count_by_room: false`: When a push notification is received, an unread count is also sent. - This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the + This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the user has unread messages in. Defaults to true, meaning push clients will see the number of rooms with unread messages in them. Set to false to instead send the number of unread messages. @@ -3347,7 +3347,7 @@ encryption_enabled_by_default_for_room_type: invite --- ### `user_directory` -This setting defines options related to the user directory. +This setting defines options related to the user directory. This option has the following sub-options: * `enabled`: Defines whether users can search the user directory. If false then @@ -3365,7 +3365,7 @@ This option has the following sub-options: Set to true to return search results containing all known users, even if that user does not share a room with the requester. * `prefer_local_users`: Defines whether to prefer local users in search query results. - If set to true, local users are more likely to appear above remote users when searching the + If set to true, local users are more likely to appear above remote users when searching the user directory. Defaults to false. Example configuration: @@ -3430,15 +3430,15 @@ user_consent: ### `stats` Settings for local room and user statistics collection. See [here](../../room_and_user_statistics.md) -for more. +for more. * `enabled`: Set to false to disable room and user statistics. Note that doing so may cause certain features (such as the room directory) not to work - correctly. Defaults to true. + correctly. Defaults to true. Example configuration: ```yaml -stats: +stats: enabled: false ``` --- @@ -3470,7 +3470,7 @@ server_notices: Set to false to disable searching the public room list. When disabled blocks searching local and remote room lists for local and remote -users by always returning an empty list for all queries. Defaults to true. +users by always returning an empty list for all queries. Defaults to true. Example configuration: ```yaml @@ -3496,7 +3496,7 @@ Options for the rules include: * `user_id`: Matches against the creator of the alias. Defaults to "*". * `alias`: Matches against the alias being created. Defaults to "*". * `room_id`: Matches against the room ID the alias is being pointed at. Defaults to "*" -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. +* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. Example configuration: ```yaml @@ -3526,7 +3526,7 @@ Options for the rules include: * `user_id`: Matches against the creator of the alias. Defaults to "*". * `alias`: Matches against any current local or canonical aliases associated with the room. Defaults to "*". * `room_id`: Matches against the room ID being published. Defaults to "*". -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. +* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. Example configuration: ```yaml @@ -3578,14 +3578,14 @@ synapse or any other services which support opentracing Sub-options include: * `enabled`: whether tracing is enabled. Set to true to enable. Disabled by default. * `homeserver_whitelist`: The list of homeservers we wish to send and receive span contexts and span baggage. - See [here](../../opentracing.md) for more. + See [here](../../opentracing.md) for more. This is a list of regexes which are matched against the `server_name` of the homeserver. By default, it is empty, so no servers are matched. * `force_tracing_for_users`: # A list of the matrix IDs of users whose requests will always be traced, even if the tracing system would otherwise drop the traces due to probabilistic sampling. By default, the list is empty. * `jaeger_config`: Jaeger can be configured to sample traces at different rates. - All configuration options provided by Jaeger can be set here. Jaeger's configuration is + All configuration options provided by Jaeger can be set here. Jaeger's configuration is mostly related to trace sampling which is documented [here](https://www.jaegertracing.io/docs/latest/sampling/). Example configuration: @@ -3613,7 +3613,7 @@ Configuration options related to workers. ### `send_federation` Controls sending of outbound federation transactions on the main process. -Set to false if using a federation sender worker. Defaults to true. +Set to false if using a federation sender worker. Defaults to true. Example configuration: ```yaml @@ -3623,12 +3623,12 @@ send_federation: false ### `federation_sender_instances` It is possible to run multiple federation sender workers, in which case the -work is balanced across them. Use this setting to list the senders. +work is balanced across them. Use this setting to list the senders. This configuration setting must be shared between all federation sender workers, and if changed all federation sender workers must be stopped at the same time and then started, to ensure that all instances are running with the same config (otherwise -events may be dropped). +events may be dropped). Example configuration: ```yaml @@ -3639,7 +3639,7 @@ federation_sender_instances: ### `instance_map` When using workers this should be a map from worker name to the -HTTP replication listener of the worker, if configured. +HTTP replication listener of the worker, if configured. Example configuration: ```yaml @@ -3688,7 +3688,7 @@ worker_replication_secret: "secret_secret" Configuration for Redis when using workers. This *must* be enabled when using workers (unless using old style direct TCP configuration). This setting has the following sub-options: -* `enabled`: whether to use Redis support. Defaults to false. +* `enabled`: whether to use Redis support. Defaults to false. * `host` and `port`: Optional host and port to use to connect to redis. Defaults to localhost and 6379 * `password`: Optional password if configured on the Redis instance. @@ -3702,7 +3702,7 @@ redis: password: ``` ## Background Updates ## -Configuration settings related to background updates. +Configuration settings related to background updates. --- ### `background_updates` @@ -3711,7 +3711,7 @@ Background updates are database updates that are run in the background in batche The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to sleep can all be configured. This is helpful to speed up or slow down the updates. This setting has the following sub-options: -* `background_update_duration_ms`: How long in milliseconds to run a batch of background updates for. Defaults to 100. +* `background_update_duration_ms`: How long in milliseconds to run a batch of background updates for. Defaults to 100. Set a different time to change the default. * `sleep_enabled`: Whether to sleep between updates. Defaults to true. Set to false to change the default. * `sleep_duration_ms`: If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. @@ -3721,7 +3721,7 @@ This setting has the following sub-options: * `default_batch_size`: The batch size to use for the first iteration of a new background update. The default is 100. Set a size to change the default. -Example configuration: +Example configuration: ```yaml background_updates: background_update_duration_ms: 500 diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6bafa7d3f3..745e704141 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -44,6 +44,7 @@ from synapse.app._base import ( register_start, ) from synapse.config._base import ConfigError, format_config_error +from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.server import ListenerConfig from synapse.federation.transport.server import TransportLayerServer @@ -201,7 +202,7 @@ class SynapseHomeServer(HomeServer): } ) - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: from synapse.rest.synapse.client.password_reset import ( PasswordResetSubmitTokenResource, ) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 73b469f414..7765c5b454 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -18,6 +18,7 @@ import email.utils import logging import os +from enum import Enum from typing import Any import attr @@ -135,22 +136,40 @@ class EmailConfig(Config): self.email_enable_notifs = email_config.get("enable_notifs", False) + self.threepid_behaviour_email = ( + # Have Synapse handle the email sending if account_threepid_delegates.email + # is not defined + # msisdn is currently always remote while Synapse does not support any method of + # sending SMS messages + ThreepidBehaviour.REMOTE + if self.root.registration.account_threepid_delegate_email + else ThreepidBehaviour.LOCAL + ) + if config.get("trust_identity_server_for_password_resets"): raise ConfigError( - 'The config option "trust_identity_server_for_password_resets" ' - "is no longer supported. Please remove it from the config file." + 'The config option "trust_identity_server_for_password_resets" has been removed.' + "Please consult the configuration manual at docs/usage/configuration/config_documentation.md for " + "details and update your config file." ) - # If we have email config settings, assume that we can verify ownership of - # email addresses. - self.can_verify_email = email_config != {} + self.local_threepid_handling_disabled_due_to_email_config = False + if ( + self.threepid_behaviour_email == ThreepidBehaviour.LOCAL + and email_config == {} + ): + # We cannot warn the user this has happened here + # Instead do so when a user attempts to reset their password + self.local_threepid_handling_disabled_due_to_email_config = True + + self.threepid_behaviour_email = ThreepidBehaviour.OFF # Get lifetime of a validation token in milliseconds self.email_validation_token_lifetime = self.parse_duration( email_config.get("validation_token_lifetime", "1h") ) - if self.can_verify_email: + if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL: missing = [] if not self.email_notif_from: missing.append("email.notif_from") @@ -341,3 +360,18 @@ class EmailConfig(Config): "Config option email.invite_client_location must be a http or https URL", path=("email", "invite_client_location"), ) + + +class ThreepidBehaviour(Enum): + """ + Enum to define the behaviour of Synapse with regards to when it contacts an identity + server for 3pid registration and password resets + + REMOTE = use an external server to send tokens + LOCAL = send tokens ourselves + OFF = disable registration via 3pid and password resets + """ + + REMOTE = "remote" + LOCAL = "local" + OFF = "off" diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 685a0423c5..01fb0331bc 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import argparse +import logging from typing import Any, Optional from synapse.api.constants import RoomCreationPreset @@ -20,11 +21,15 @@ from synapse.config._base import Config, ConfigError from synapse.types import JsonDict, RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool -NO_EMAIL_DELEGATE_ERROR = """\ -Delegation of email verification to an identity server is no longer supported. To +logger = logging.getLogger(__name__) + +LEGACY_EMAIL_DELEGATE_WARNING = """\ +Delegation of email verification to an identity server is now deprecated. To continue to allow users to add email addresses to their accounts, and use them for password resets, configure Synapse with an SMTP server via the `email` setting, and remove `account_threepid_delegates.email`. + +This will be an error in a future version. """ @@ -59,8 +64,9 @@ class RegistrationConfig(Config): account_threepid_delegates = config.get("account_threepid_delegates") or {} if "email" in account_threepid_delegates: - raise ConfigError(NO_EMAIL_DELEGATE_ERROR) - # self.account_threepid_delegate_email = account_threepid_delegates.get("email") + logger.warning(LEGACY_EMAIL_DELEGATE_WARNING) + + self.account_threepid_delegate_email = account_threepid_delegates.get("email") self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 9571d461c8..e5afe84df9 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.config.emailconfig import ThreepidBehaviour from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest @@ -415,6 +416,48 @@ class IdentityHandler: return session_id + async def request_email_token( + self, + id_server: str, + email: str, + client_secret: str, + send_attempt: int, + next_link: Optional[str] = None, + ) -> JsonDict: + """ + Request an external server send an email on our behalf for the purposes of threepid + validation. + + Args: + id_server: The identity server to proxy to + email: The email to send the message to + client_secret: The unique client_secret sends by the user + send_attempt: Which attempt this is + next_link: A link to redirect the user to once they submit the token + + Returns: + The json response body from the server + """ + params = { + "email": email, + "client_secret": client_secret, + "send_attempt": send_attempt, + } + if next_link: + params["next_link"] = next_link + + try: + data = await self.http_client.post_json_get_json( + id_server + "/_matrix/identity/api/v1/validate/email/requestToken", + params, + ) + return data + except HttpResponseException as e: + logger.info("Proxied requestToken failed: %r", e) + raise e.to_synapse_error() + except RequestTimedOutError: + raise SynapseError(500, "Timed out contacting identity server") + async def requestMsisdnToken( self, id_server: str, @@ -488,7 +531,18 @@ class IdentityHandler: validation_session = None # Try to validate as email - if self.hs.config.email.can_verify_email: + if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + # Remote emails will only be used if a valid identity server is provided. + assert ( + self.hs.config.registration.account_threepid_delegate_email is not None + ) + + # Ask our delegated email identity server + validation_session = await self.threepid_from_creds( + self.hs.config.registration.account_threepid_delegate_email, + threepid_creds, + ) + elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: # Get a validated session matching these details validation_session = await self.store.get_threepid_validation_session( "email", client_secret, sid=sid, validated=True diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index a744d68c64..05cebb5d4d 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -19,6 +19,7 @@ from twisted.web.client import PartialDownloadError from synapse.api.constants import LoginType from synapse.api.errors import Codes, LoginError, SynapseError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.util import json_decoder if TYPE_CHECKING: @@ -152,7 +153,7 @@ class _BaseThreepidAuthChecker: logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - # msisdns are currently always verified via the IS + # msisdns are currently always ThreepidBehaviour.REMOTE if medium == "msisdn": if not self.hs.config.registration.account_threepid_delegate_msisdn: raise SynapseError( @@ -163,7 +164,18 @@ class _BaseThreepidAuthChecker: threepid_creds, ) elif medium == "email": - if self.hs.config.email.can_verify_email: + if ( + self.hs.config.email.threepid_behaviour_email + == ThreepidBehaviour.REMOTE + ): + assert self.hs.config.registration.account_threepid_delegate_email + threepid = await identity_handler.threepid_from_creds( + self.hs.config.registration.account_threepid_delegate_email, + threepid_creds, + ) + elif ( + self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL + ): threepid = None row = await self.store.get_threepid_validation_session( medium, @@ -215,7 +227,10 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec _BaseThreepidAuthChecker.__init__(self, hs) def is_enabled(self) -> bool: - return self.hs.config.email.can_verify_email + return self.hs.config.email.threepid_behaviour_email in ( + ThreepidBehaviour.REMOTE, + ThreepidBehaviour.LOCAL, + ) async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("email", authdict) diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 0cc87a4001..50edc6b7d3 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -28,6 +28,7 @@ from synapse.api.errors import ( SynapseError, ThreepidValidationError, ) +from synapse.config.emailconfig import ThreepidBehaviour from synapse.handlers.ui_auth import UIAuthSessionDataConstants from synapse.http.server import HttpServer, finish_request, respond_with_html from synapse.http.servlet import ( @@ -63,7 +64,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): self.config = hs.config self.identity_handler = hs.get_identity_handler() - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -72,10 +73,11 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.config.email.can_verify_email: - logger.warning( - "User password resets have been disabled due to lack of email config" - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.email.local_threepid_handling_disabled_due_to_email_config: + logger.warning( + "User password resets have been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based password resets have been disabled on this server" ) @@ -127,21 +129,35 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) - # Send password reset emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_password_reset_mail, - next_link, - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + assert self.hs.config.registration.account_threepid_delegate_email + + # Have the configured identity server handle the request + ret = await self.identity_handler.request_email_token( + self.hs.config.registration.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, + ) + else: + # Send password reset emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_password_reset_mail, + next_link, + ) + + # Wrap the session id in a JSON object + ret = {"sid": sid} threepid_send_requests.labels(type="email", reason="password_reset").observe( send_attempt ) - # Wrap the session id in a JSON object - return 200, {"sid": sid} + return 200, ret class PasswordRestServlet(RestServlet): @@ -333,7 +349,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() self.store = self.hs.get_datastores().main - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -342,10 +358,11 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.config.email.can_verify_email: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.email.local_threepid_handling_disabled_due_to_email_config: + logger.warning( + "Adding emails have been disabled due to lack of an email config" + ) raise SynapseError( 400, "Adding an email to your account is disabled on this server" ) @@ -396,20 +413,35 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_add_threepid_mail, - next_link, - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + assert self.hs.config.registration.account_threepid_delegate_email + + # Have the configured identity server handle the request + ret = await self.identity_handler.request_email_token( + self.hs.config.registration.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, + ) + else: + # Send threepid validation emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_add_threepid_mail, + next_link, + ) + + # Wrap the session id in a JSON object + ret = {"sid": sid} threepid_send_requests.labels(type="email", reason="add_threepid").observe( send_attempt ) - # Wrap the session id in a JSON object - return 200, {"sid": sid} + return 200, ret class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -502,19 +534,25 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): self.config = hs.config self.clock = hs.get_clock() self.store = hs.get_datastores().main - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self._failure_email_template = ( self.config.email.email_add_threepid_template_failure_html ) async def on_GET(self, request: Request) -> None: - if not self.config.email.can_verify_email: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.email.local_threepid_handling_disabled_due_to_email_config: + logger.warning( + "Adding emails have been disabled due to lack of an email config" + ) raise SynapseError( 400, "Adding an email to your account is disabled on this server" ) + elif self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + raise SynapseError( + 400, + "This homeserver is not validating threepids.", + ) sid = parse_string(request, "sid", required=True) token = parse_string(request, "token", required=True) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index a8402cdb3a..b7ab090bbd 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -31,6 +31,7 @@ from synapse.api.errors import ( ) from synapse.api.ratelimiting import Ratelimiter from synapse.config import ConfigError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.config.server import is_threepid_reserved @@ -73,7 +74,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() self.config = hs.config - if self.hs.config.email.can_verify_email: + if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -82,10 +83,13 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.email.can_verify_email: - logger.warning( - "Email registration has been disabled due to lack of email config" - ) + if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if ( + self.hs.config.email.local_threepid_handling_disabled_due_to_email_config + ): + logger.warning( + "Email registration has been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based registration has been disabled on this server" ) @@ -134,21 +138,35 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - # Send registration emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_registration_mail, - next_link, - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + assert self.hs.config.registration.account_threepid_delegate_email + + # Have the configured identity server handle the request + ret = await self.identity_handler.request_email_token( + self.hs.config.registration.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, + ) + else: + # Send registration emails from Synapse, + # wrapping the session id in a JSON object. + ret = { + "sid": await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_registration_mail, + next_link, + ) + } threepid_send_requests.labels(type="email", reason="register").observe( send_attempt ) - # Wrap the session id in a JSON object - return 200, {"sid": sid} + return 200, ret class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -242,7 +260,7 @@ class RegistrationSubmitTokenServlet(RestServlet): self.clock = hs.get_clock() self.store = hs.get_datastores().main - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self._failure_email_template = ( self.config.email.email_registration_template_failure_html ) @@ -252,10 +270,11 @@ class RegistrationSubmitTokenServlet(RestServlet): raise SynapseError( 400, "This medium is currently not supported for registration" ) - if not self.config.email.can_verify_email: - logger.warning( - "User registration via email has been disabled due to lack of email config" - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.email.local_threepid_handling_disabled_due_to_email_config: + logger.warning( + "User registration via email has been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based registration is disabled on this server" ) diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py index b9402cfb75..6ac9dbc7c9 100644 --- a/synapse/rest/synapse/client/password_reset.py +++ b/synapse/rest/synapse/client/password_reset.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Tuple from twisted.web.server import Request from synapse.api.errors import ThreepidValidationError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.server import DirectServeHtmlResource from synapse.http.servlet import parse_string from synapse.util.stringutils import assert_valid_client_secret @@ -45,6 +46,9 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): self.clock = hs.get_clock() self.store = hs.get_datastores().main + self._local_threepid_handling_disabled_due_to_email_config = ( + hs.config.email.local_threepid_handling_disabled_due_to_email_config + ) self._confirmation_email_template = ( hs.config.email.email_password_reset_template_confirmation_html ) @@ -55,8 +59,8 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): hs.config.email.email_password_reset_template_failure_html ) - # This resource should only be mounted if email validation is enabled - assert hs.config.email.can_verify_email + # This resource should not be mounted if threepid behaviour is not LOCAL + assert hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]: sid = parse_string(request, "sid", required=True) diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 071b488cc0..f8e64ce6ac 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -586,9 +586,9 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "require_at_registration": True, }, "account_threepid_delegates": { + "email": "https://id_server", "msisdn": "https://id_server", }, - "email": {"notif_from": "Synapse "}, } ) def test_advertised_flows_captcha_and_terms_and_3pids(self) -> None: -- cgit 1.4.1 From 6b4fd8b43068c863b2861be997f8acb7568aff47 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 29 Jul 2022 12:23:13 +0100 Subject: 1.64.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/13406.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/13406.misc diff --git a/CHANGES.md b/CHANGES.md index da588454c7..2f46c51bae 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.64.0rc2 (2022-07-29) +============================== + +Internal Changes +---------------- + +- Warn instead of error when using `account_threepid_delegates.email`, which was deprecated in 1.64.0rc1. ([\#13406](https://github.com/matrix-org/synapse/issues/13406)) + + Synapse 1.64.0rc1 (2022-07-26) ============================== diff --git a/changelog.d/13406.misc b/changelog.d/13406.misc deleted file mode 100644 index f78e052e87..0000000000 --- a/changelog.d/13406.misc +++ /dev/null @@ -1 +0,0 @@ -Warn instead of error when using `account_threepid_delegates.email`, which was deprecated in 1.64.0rc1. diff --git a/debian/changelog b/debian/changelog index 38030a5e19..6a2dbdf322 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.64.0~rc2) stable; urgency=medium + + * New Synapse release 1.64.0rc2. + + -- Synapse Packaging team Fri, 29 Jul 2022 12:22:53 +0100 + matrix-synapse-py3 (1.64.0~rc1) stable; urgency=medium * New Synapse release 1.64.0rc1. diff --git a/pyproject.toml b/pyproject.toml index c707737590..4e8f7ae4f1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.64.0rc1" +version = "1.64.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From 979d94de2984a9712373c3684ba69d2f911ca08a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 29 Jul 2022 12:27:23 +0100 Subject: update changelog --- CHANGES.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2f46c51bae..78c5d3038a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,16 +1,13 @@ Synapse 1.64.0rc2 (2022-07-29) ============================== -Internal Changes ----------------- - -- Warn instead of error when using `account_threepid_delegates.email`, which was deprecated in 1.64.0rc1. ([\#13406](https://github.com/matrix-org/synapse/issues/13406)) +This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in a future release. ([\#13406](https://github.com/matrix-org/synapse/issues/13406)) Synapse 1.64.0rc1 (2022-07-26) ============================== -As of this release, Synapse no longer allows the tasks of verifying email address ownership, and password reset confirmation, to be delegated to an identity server. For more information, see the [upgrade notes](https://matrix-org.github.io/synapse/v1.64/upgrade.html#upgrading-to-v1640). +This RC removed the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longer an active version of Ubuntu. -- cgit 1.4.1 From 23768ccb4d00ae6d4c01d30178ba223a4bbb10f2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 1 Aug 2022 11:20:05 +0100 Subject: Faster joins: fix rejected events becoming un-rejected during resync (#13413) Make sure that we re-check the auth rules during state resync, otherwise rejected events get un-rejected. --- changelog.d/13413.bugfix | 1 + synapse/handlers/federation_event.py | 29 ++++++++++++++++++++++++++--- synapse/storage/databases/main/state.py | 8 +++++--- 3 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13413.bugfix diff --git a/changelog.d/13413.bugfix b/changelog.d/13413.bugfix new file mode 100644 index 0000000000..a0ce884274 --- /dev/null +++ b/changelog.d/13413.bugfix @@ -0,0 +1 @@ +Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. \ No newline at end of file diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 2ba2b1527e..bcc755a376 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -581,6 +581,13 @@ class FederationEventHandler: event.event_id, ) return + + # since the state at this event has changed, we should now re-evaluate + # whether it should have been rejected. We must already have all of the + # auth events (from last time we went round this path), so there is no + # need to pass the origin. + await self._check_event_auth(None, event, context) + await self._store.update_state_for_partial_state_event(event, context) self._state_storage_controller.notify_event_un_partial_stated( event.event_id @@ -1624,13 +1631,15 @@ class FederationEventHandler: ) async def _check_event_auth( - self, origin: str, event: EventBase, context: EventContext + self, origin: Optional[str], event: EventBase, context: EventContext ) -> None: """ Checks whether an event should be rejected (for failing auth checks). Args: - origin: The host the event originates from. + origin: The host the event originates from. This is used to fetch + any missing auth events. It can be set to None, but only if we are + sure that we already have all the auth events. event: The event itself. context: The event context. @@ -1876,7 +1885,7 @@ class FederationEventHandler: event.internal_metadata.soft_failed = True async def _load_or_fetch_auth_events_for_event( - self, destination: str, event: EventBase + self, destination: Optional[str], event: EventBase ) -> Collection[EventBase]: """Fetch this event's auth_events, from database or remote @@ -1892,12 +1901,19 @@ class FederationEventHandler: Args: destination: where to send the /event_auth request. Typically the server that sent us `event` in the first place. + + If this is None, no attempt is made to load any missing auth events: + rather, an AssertionError is raised if there are any missing events. + event: the event whose auth_events we want Returns: all of the events listed in `event.auth_events_ids`, after deduplication Raises: + AssertionError if some auth events were missing and no `destination` was + supplied. + AuthError if we were unable to fetch the auth_events for any reason. """ event_auth_event_ids = set(event.auth_event_ids()) @@ -1909,6 +1925,13 @@ class FederationEventHandler: ) if not missing_auth_event_ids: return event_auth_events.values() + if destination is None: + # this shouldn't happen: destination must be set unless we know we have already + # persisted the auth events. + raise AssertionError( + "_load_or_fetch_auth_events_for_event() called with no destination for " + "an event with missing auth_events" + ) logger.info( "Event %s refers to unknown auth events %s: fetching auth chain", diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 9674c4a757..f70705a0af 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -419,13 +419,15 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # anything that was rejected should have the same state as its # predecessor. if context.rejected: - assert context.state_group == context.state_group_before_event + state_group = context.state_group_before_event + else: + state_group = context.state_group self.db_pool.simple_update_txn( txn, table="event_to_state_groups", keyvalues={"event_id": event.event_id}, - updatevalues={"state_group": context.state_group}, + updatevalues={"state_group": state_group}, ) self.db_pool.simple_delete_one_txn( @@ -440,7 +442,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): txn.call_after( self._get_state_group_for_event.prefill, (event.event_id,), - context.state_group, + state_group, ) -- cgit 1.4.1 From b817574be7fadf21e0224fe9bab4987230b59fcf Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 1 Aug 2022 10:51:44 +0000 Subject: Re-enable running Complement tests against Synapse with workers. (#13420) --- .github/workflows/tests.yml | 27 +++------------------------ changelog.d/13420.misc | 1 + 2 files changed, 4 insertions(+), 24 deletions(-) create mode 100644 changelog.d/13420.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c8b033e8a4..4bc29c8207 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -328,29 +328,8 @@ jobs: - arrangement: monolith database: Postgres - steps: - - name: Run actions/checkout@v2 for synapse - uses: actions/checkout@v2 - with: - path: synapse - - - name: Prepare Complement's Prerequisites - run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - - run: | - set -o pipefail - POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt - shell: bash - name: Run Complement Tests - - # XXX When complement with workers is stable, move this back into the standard - # "complement" matrix above. - # - # See https://github.com/matrix-org/synapse/issues/13161 - complement-workers: - if: "${{ !failure() && !cancelled() }}" - needs: linting-done - runs-on: ubuntu-latest + - arrangement: workers + database: Postgres steps: - name: Run actions/checkout@v2 for synapse @@ -363,7 +342,7 @@ jobs: - run: | set -o pipefail - POSTGRES=1 WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt shell: bash name: Run Complement Tests diff --git a/changelog.d/13420.misc b/changelog.d/13420.misc new file mode 100644 index 0000000000..ff1a68e2e8 --- /dev/null +++ b/changelog.d/13420.misc @@ -0,0 +1 @@ +Re-enable running Complement tests against Synapse with workers. \ No newline at end of file -- cgit 1.4.1 From 05aeeb3a8024f01463f2a0089a88f5b28a2b89cc Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 1 Aug 2022 10:55:31 +0000 Subject: Enable Complement CI tests in the 'latest deps' test run. (#13213) Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- .github/workflows/latest_deps.yml | 37 ++++++++++++++++++++++++++++++++++--- changelog.d/13213.misc | 1 + docker/Dockerfile | 24 ++++++++++++++++++++++-- scripts-dev/complement.sh | 1 + 4 files changed, 58 insertions(+), 5 deletions(-) create mode 100644 changelog.d/13213.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index c537a5a60f..f263cf612d 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -135,11 +135,42 @@ jobs: /logs/**/*.log* - # TODO: run complement (as with twisted trunk, see #12473). + complement: + if: "${{ !failure() && !cancelled() }}" + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + include: + - arrangement: monolith + database: SQLite + + - arrangement: monolith + database: Postgres + + - arrangement: workers + database: Postgres + + steps: + - name: Run actions/checkout@v2 for synapse + uses: actions/checkout@v2 + with: + path: synapse + + - name: Prepare Complement's Prerequisites + run: synapse/.ci/scripts/setup_complement_prerequisites.sh + + - run: | + set -o pipefail + TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + shell: bash + name: Run Complement Tests - # open an issue if the build fails, so we know about it. + # Open an issue if the build fails, so we know about it. + # Only do this if we're not experimenting with this action in a PR. open-issue: - if: failure() + if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'" needs: # TODO: should mypy be included here? It feels more brittle than the other two. - mypy diff --git a/changelog.d/13213.misc b/changelog.d/13213.misc new file mode 100644 index 0000000000..b50d26ac0c --- /dev/null +++ b/changelog.d/13213.misc @@ -0,0 +1 @@ +Enable Complement CI tests in the 'latest deps' test run. \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index f4d8e6c925..97bb03b08f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -68,7 +68,18 @@ COPY pyproject.toml poetry.lock /synapse/ # reason, such as when a git repository is used directly as a dependency. ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION -RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes} +# If specified, we won't use the Poetry lockfile. +# Instead, we'll just install what a regular `pip install` would from PyPI. +ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE + +# Export the dependencies, but only if we're actually going to use the Poetry lockfile. +# Otherwise, just create an empty requirements file so that the Dockerfile can +# proceed. +RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ + /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \ + else \ + touch /synapse/requirements.txt; \ + fi ### ### Stage 1: builder @@ -108,8 +119,17 @@ COPY synapse /synapse/synapse/ # ... and what we need to `pip install`. COPY pyproject.toml README.rst /synapse/ +# Repeat of earlier build argument declaration, as this is a new build stage. +ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE + # Install the synapse package itself. -RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse +# If we have populated requirements.txt, we don't install any dependencies +# as we should already have those from the previous `pip install` step. +RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ + pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \ + else \ + pip install --prefix="/install" --no-warn-script-location /synapse[all]; \ + fi ### ### Stage 2: runtime diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 6381f7092e..eab23f18f1 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -101,6 +101,7 @@ if [ -z "$skip_docker_build" ]; then echo_if_github "::group::Build Docker image: matrixdotorg/synapse" docker build -t matrixdotorg/synapse \ --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \ + --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \ -f "docker/Dockerfile" . echo_if_github "::endgroup::" -- cgit 1.4.1 From 224d792dd7827fb53b9ed3b73a99f4acefb456ba Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 1 Aug 2022 13:53:56 +0100 Subject: Refactor `_resolve_state_at_missing_prevs` to return an `EventContext` (#13404) Previously, `_resolve_state_at_missing_prevs` returned the resolved state before an event and a partial state flag. These were unwieldy to carry around would only ever be used to build an event context. Build the event context directly instead. Signed-off-by: Sean Quah --- changelog.d/13404.misc | 1 + synapse/handlers/federation_event.py | 126 ++++++++++++----------------------- synapse/state/__init__.py | 8 +++ synapse/storage/controllers/state.py | 4 ++ tests/handlers/test_federation.py | 15 +++-- 5 files changed, 68 insertions(+), 86 deletions(-) create mode 100644 changelog.d/13404.misc diff --git a/changelog.d/13404.misc b/changelog.d/13404.misc new file mode 100644 index 0000000000..655be4061b --- /dev/null +++ b/changelog.d/13404.misc @@ -0,0 +1 @@ +Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index bcc755a376..612e5aaa5b 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -23,7 +23,6 @@ from typing import ( Dict, Iterable, List, - Optional, Sequence, Set, Tuple, @@ -278,9 +277,8 @@ class FederationEventHandler: ) try: - await self._process_received_pdu( - origin, pdu, state_ids=None, partial_state=None - ) + context = await self._state_handler.compute_event_context(pdu) + await self._process_received_pdu(origin, pdu, context) except PartialStateConflictError: # The room was un-partial stated while we were processing the PDU. # Try once more, with full state this time. @@ -288,9 +286,8 @@ class FederationEventHandler: "Room %s was un-partial stated while processing the PDU, trying again.", room_id, ) - await self._process_received_pdu( - origin, pdu, state_ids=None, partial_state=None - ) + context = await self._state_handler.compute_event_context(pdu) + await self._process_received_pdu(origin, pdu, context) async def on_send_membership_event( self, origin: str, event: EventBase @@ -320,6 +317,7 @@ class FederationEventHandler: The event and context of the event after inserting it into the room graph. Raises: + RuntimeError if any prev_events are missing SynapseError if the event is not accepted into the room PartialStateConflictError if the room was un-partial stated in between computing the state at the event and persisting it. The caller should @@ -380,7 +378,7 @@ class FederationEventHandler: # need to. await self._event_creation_handler.cache_joined_hosts_for_event(event, context) - await self._check_for_soft_fail(event, None, origin=origin) + await self._check_for_soft_fail(event, context=context, origin=origin) await self._run_push_actions_and_persist_event(event, context) return event, context @@ -538,36 +536,10 @@ class FederationEventHandler: # # This is the same operation as we do when we receive a regular event # over federation. - state_ids, partial_state = await self._resolve_state_at_missing_prevs( + context = await self._compute_event_context_with_maybe_missing_prevs( destination, event ) - - # There are three possible cases for (state_ids, partial_state): - # * `state_ids` and `partial_state` are both `None` if we had all the - # prev_events. The prev_events may or may not have partial state and - # we won't know until we compute the event context. - # * `state_ids` is not `None` and `partial_state` is `False` if we were - # missing some prev_events (but we have full state for any we did - # have). We calculated the full state after the prev_events. - # * `state_ids` is not `None` and `partial_state` is `True` if we were - # missing some, but not all, prev_events. At least one of the - # prev_events we did have had partial state, so we calculated a partial - # state after the prev_events. - - context = None - if state_ids is not None and partial_state: - # the state after the prev events is still partial. We can't de-partial - # state the event, so don't bother building the event context. - pass - else: - # build a new state group for it if need be - context = await self._state_handler.compute_event_context( - event, - state_ids_before_event=state_ids, - partial_state=partial_state, - ) - - if context is None or context.partial_state: + if context.partial_state: # this can happen if some or all of the event's prev_events still have # partial state. We were careful to only pick events from the db without # partial-state prev events, so that implies that a prev event has @@ -840,26 +812,25 @@ class FederationEventHandler: try: try: - state_ids, partial_state = await self._resolve_state_at_missing_prevs( + context = await self._compute_event_context_with_maybe_missing_prevs( origin, event ) await self._process_received_pdu( origin, event, - state_ids=state_ids, - partial_state=partial_state, + context, backfilled=backfilled, ) except PartialStateConflictError: # The room was un-partial stated while we were processing the event. # Try once more, with full state this time. - state_ids, partial_state = await self._resolve_state_at_missing_prevs( + context = await self._compute_event_context_with_maybe_missing_prevs( origin, event ) # We ought to have full state now, barring some unlikely race where we left and # rejoned the room in the background. - if state_ids is not None and partial_state: + if context.partial_state: raise AssertionError( f"Event {event.event_id} still has a partial resolved state " f"after room {event.room_id} was un-partial stated" @@ -868,8 +839,7 @@ class FederationEventHandler: await self._process_received_pdu( origin, event, - state_ids=state_ids, - partial_state=partial_state, + context, backfilled=backfilled, ) except FederationError as e: @@ -878,15 +848,18 @@ class FederationEventHandler: else: raise - async def _resolve_state_at_missing_prevs( + async def _compute_event_context_with_maybe_missing_prevs( self, dest: str, event: EventBase - ) -> Tuple[Optional[StateMap[str]], Optional[bool]]: - """Calculate the state at an event with missing prev_events. + ) -> EventContext: + """Build an EventContext structure for a non-outlier event whose prev_events may + be missing. - This is used when we have pulled a batch of events from a remote server, and - still don't have all the prev_events. + This is used when we have pulled a batch of events from a remote server, and may + not have all the prev_events. - If we already have all the prev_events for `event`, this method does nothing. + To build an EventContext, we need to calculate the state before the event. If we + already have all the prev_events for `event`, we can simply use the state after + the prev_events to calculate the state before `event`. Otherwise, the missing prevs become new backwards extremities, and we fall back to asking the remote server for the state after each missing `prev_event`, @@ -907,10 +880,7 @@ class FederationEventHandler: event: an event to check for missing prevs. Returns: - if we already had all the prev events, `None, None`. Otherwise, returns a - tuple containing: - * the event ids of the state at `event`. - * a boolean indicating whether the state may be partial. + The event context. Raises: FederationError if we fail to get the state from the remote server after any @@ -924,7 +894,7 @@ class FederationEventHandler: missing_prevs = prevs - seen if not missing_prevs: - return None, None + return await self._state_handler.compute_event_context(event) logger.info( "Event %s is missing prev_events %s: calculating state for a " @@ -990,7 +960,9 @@ class FederationEventHandler: "We can't get valid state history.", affected=event_id, ) - return state_map, partial_state + return await self._state_handler.compute_event_context( + event, state_ids_before_event=state_map, partial_state=partial_state + ) async def _get_state_ids_after_missing_prev_event( self, @@ -1159,8 +1131,7 @@ class FederationEventHandler: self, origin: str, event: EventBase, - state_ids: Optional[StateMap[str]], - partial_state: Optional[bool], + context: EventContext, backfilled: bool = False, ) -> None: """Called when we have a new non-outlier event. @@ -1182,32 +1153,18 @@ class FederationEventHandler: event: event to be persisted - state_ids: Normally None, but if we are handling a gap in the graph - (ie, we are missing one or more prev_events), the resolved state at the - event - - partial_state: - `True` if `state_ids` is partial and omits non-critical membership - events. - `False` if `state_ids` is the full state. - `None` if `state_ids` is not provided. In this case, the flag will be - calculated based on `event`'s prev events. + context: The `EventContext` to persist the event with. backfilled: True if this is part of a historical batch of events (inhibits notification to clients, and validation of device keys.) PartialStateConflictError: if the room was un-partial stated in between - computing the state at the event and persisting it. The caller should retry - exactly once in this case. + computing the state at the event and persisting it. The caller should + recompute `context` and retry exactly once when this happens. """ logger.debug("Processing event: %s", event) assert not event.internal_metadata.outlier - context = await self._state_handler.compute_event_context( - event, - state_ids_before_event=state_ids, - partial_state=partial_state, - ) try: await self._check_event_auth(origin, event, context) except AuthError as e: @@ -1219,7 +1176,7 @@ class FederationEventHandler: # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. - await self._check_for_soft_fail(event, state_ids, origin=origin) + await self._check_for_soft_fail(event, context=context, origin=origin) await self._run_push_actions_and_persist_event(event, context, backfilled) @@ -1782,7 +1739,7 @@ class FederationEventHandler: async def _check_for_soft_fail( self, event: EventBase, - state_ids: Optional[StateMap[str]], + context: EventContext, origin: str, ) -> None: """Checks if we should soft fail the event; if so, marks the event as @@ -1793,7 +1750,7 @@ class FederationEventHandler: Args: event - state_ids: The state at the event if we don't have all the event's prev events + context: The `EventContext` which we are about to persist the event with. origin: The host the event originates from. """ if await self._store.is_partial_state_room(event.room_id): @@ -1819,11 +1776,15 @@ class FederationEventHandler: auth_types = auth_types_for_event(room_version_obj, event) # Calculate the "current state". - if state_ids is not None: - # If we're explicitly given the state then we won't have all the - # prev events, and so we have a gap in the graph. In this case - # we want to be a little careful as we might have been down for - # a while and have an incorrect view of the current state, + seen_event_ids = await self._store.have_events_in_timeline(prev_event_ids) + has_missing_prevs = bool(prev_event_ids - seen_event_ids) + if has_missing_prevs: + # We don't have all the prev_events of this event, which means we have a + # gap in the graph, and the new event is going to become a new backwards + # extremity. + # + # In this case we want to be a little careful as we might have been + # down for a while and have an incorrect view of the current state, # however we still want to do checks as gaps are easy to # maliciously manufacture. # @@ -1836,6 +1797,7 @@ class FederationEventHandler: event.room_id, extrem_ids ) state_sets: List[StateMap[str]] = list(state_sets_d.values()) + state_ids = await context.get_prev_state_ids() state_sets.append(state_ids) current_state_ids = ( await self._state_resolution_handler.resolve_events_with_store( diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 69834de0de..c355e4f98a 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -278,6 +278,10 @@ class StateHandler: flag will be calculated based on `event`'s prev events. Returns: The event context. + + Raises: + RuntimeError if `state_ids_before_event` is not provided and one or more + prev events are missing or outliers. """ assert not event.internal_metadata.is_outlier() @@ -432,6 +436,10 @@ class StateHandler: Returns: The resolved state + + Raises: + RuntimeError if we don't have a state group for one or more of the events + (ie. they are outliers or unknown) """ logger.debug("resolve_state_groups event_ids %s", event_ids) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 20805c94fa..1e35046e07 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -338,6 +338,10 @@ class StateStorageController: event_ids: events to get state groups for await_full_state: if true, will block if we do not yet have complete state at these events. + + Raises: + RuntimeError if we don't have a state group for one or more of the events + (ie. they are outliers or unknown) """ if await_full_state: await self._partial_state_events_tracker.await_full_state(event_ids) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index fb06e5e812..aea96a0986 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -280,16 +280,23 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): # we poke this directly into _process_received_pdu, to avoid the # federation handler wanting to backfill the fake event. - self.get_success( - federation_event_handler._process_received_pdu( - self.OTHER_SERVER_NAME, + state_handler = self.hs.get_state_handler() + context = self.get_success( + state_handler.compute_event_context( event, - state_ids={ + state_ids_before_event={ (e.type, e.state_key): e.event_id for e in current_state }, partial_state=False, ) ) + self.get_success( + federation_event_handler._process_received_pdu( + self.OTHER_SERVER_NAME, + event, + context, + ) + ) # we should now have 8 backwards extremities. backwards_extremities = self.get_success( -- cgit 1.4.1 From f8e7a9418a755b888ed254896bada5e45ebc0a04 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 1 Aug 2022 10:14:29 -0400 Subject: Fix missing import in `federation_event` handler. (#13431) #13404 removed an import of `Optional` which was still needed due to #13413 added more usages. --- changelog.d/13431.misc | 1 + synapse/handlers/federation_event.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/13431.misc diff --git a/changelog.d/13431.misc b/changelog.d/13431.misc new file mode 100644 index 0000000000..655be4061b --- /dev/null +++ b/changelog.d/13431.misc @@ -0,0 +1 @@ +Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 612e5aaa5b..91d1439191 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -23,6 +23,7 @@ from typing import ( Dict, Iterable, List, + Optional, Sequence, Set, Tuple, -- cgit 1.4.1 From e17e5c97e0d2be7a82c782e8ef9774e682968c7b Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 1 Aug 2022 16:45:39 +0000 Subject: Faster Room Joins: don't leave a stuck room partial state flag if the join fails. (#13403) --- changelog.d/13403.misc | 1 + synapse/handlers/federation.py | 32 +++++----- tests/handlers/test_federation.py | 122 +++++++++++++++++++++++++++++++++++++- 3 files changed, 140 insertions(+), 15 deletions(-) create mode 100644 changelog.d/13403.misc diff --git a/changelog.d/13403.misc b/changelog.d/13403.misc new file mode 100644 index 0000000000..cb7b38153c --- /dev/null +++ b/changelog.d/13403.misc @@ -0,0 +1 @@ +Faster Room Joins: don't leave a stuck room partial state flag if the join fails. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3b5eaf5156..1cf6cb32e3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -546,9 +546,9 @@ class FederationHandler: ) if ret.partial_state: - # TODO(faster_joins): roll this back if we don't manage to start the - # background resync (eg process_remote_join fails) - # https://github.com/matrix-org/synapse/issues/12998 + # Mark the room as having partial state. + # The background process is responsible for unmarking this flag, + # even if the join fails. await self.store.store_partial_state_room(room_id, ret.servers_in_room) try: @@ -574,17 +574,21 @@ class FederationHandler: room_id, ) raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0) - - if ret.partial_state: - # Kick off the process of asynchronously fetching the state for this - # room. - run_as_background_process( - desc="sync_partial_state_room", - func=self._sync_partial_state_room, - initial_destination=origin, - other_destinations=ret.servers_in_room, - room_id=room_id, - ) + finally: + # Always kick off the background process that asynchronously fetches + # state for the room. + # If the join failed, the background process is responsible for + # cleaning up — including unmarking the room as a partial state room. + if ret.partial_state: + # Kick off the process of asynchronously fetching the state for this + # room. + run_as_background_process( + desc="sync_partial_state_room", + func=self._sync_partial_state_room, + initial_destination=origin, + other_destinations=ret.servers_in_room, + room_id=room_id, + ) # We wait here until this instance has seen the events come down # replication (if we're using replication) as the below uses caches. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index aea96a0986..745750b1d7 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -14,6 +14,7 @@ import logging from typing import cast from unittest import TestCase +from unittest.mock import Mock, patch from twisted.test.proto_helpers import MemoryReactor @@ -22,6 +23,7 @@ from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseErro from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.federation.federation_base import event_from_pdu_json +from synapse.federation.federation_client import SendJoinResult from synapse.logging.context import LoggingContext, run_in_background from synapse.rest import admin from synapse.rest.client import login, room @@ -30,7 +32,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest -from tests.test_utils import event_injection +from tests.test_utils import event_injection, make_awaitable logger = logging.getLogger(__name__) @@ -456,3 +458,121 @@ class EventFromPduTestCase(TestCase): }, RoomVersions.V6, ) + + +class PartialJoinTestCase(unittest.FederatingHomeserverTestCase): + def test_failed_partial_join_is_clean(self) -> None: + """ + Tests that, when failing to partial-join a room, we don't get stuck with + a partial-state flag on a room. + """ + + fed_handler = self.hs.get_federation_handler() + fed_client = fed_handler.federation_client + + room_id = "!room:example.com" + membership_event = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.member", + "sender": "@alice:test", + "state_key": "@alice:test", + "content": {"membership": "join"}, + }, + RoomVersions.V10, + ) + + mock_make_membership_event = Mock( + return_value=make_awaitable( + ( + "example.com", + membership_event, + RoomVersions.V10, + ) + ) + ) + + EVENT_CREATE = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.create", + "sender": "@kristina:example.com", + "state_key": "", + "depth": 0, + "content": {"creator": "@kristina:example.com", "room_version": "10"}, + "auth_events": [], + "origin_server_ts": 1, + }, + room_version=RoomVersions.V10, + ) + EVENT_CREATOR_MEMBERSHIP = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.member", + "sender": "@kristina:example.com", + "state_key": "@kristina:example.com", + "content": {"membership": "join"}, + "depth": 1, + "prev_events": [EVENT_CREATE.event_id], + "auth_events": [EVENT_CREATE.event_id], + "origin_server_ts": 1, + }, + room_version=RoomVersions.V10, + ) + EVENT_INVITATION_MEMBERSHIP = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.member", + "sender": "@kristina:example.com", + "state_key": "@alice:test", + "content": {"membership": "invite"}, + "depth": 2, + "prev_events": [EVENT_CREATOR_MEMBERSHIP.event_id], + "auth_events": [ + EVENT_CREATE.event_id, + EVENT_CREATOR_MEMBERSHIP.event_id, + ], + "origin_server_ts": 1, + }, + room_version=RoomVersions.V10, + ) + mock_send_join = Mock( + return_value=make_awaitable( + SendJoinResult( + membership_event, + "example.com", + state=[ + EVENT_CREATE, + EVENT_CREATOR_MEMBERSHIP, + EVENT_INVITATION_MEMBERSHIP, + ], + auth_chain=[ + EVENT_CREATE, + EVENT_CREATOR_MEMBERSHIP, + EVENT_INVITATION_MEMBERSHIP, + ], + partial_state=True, + servers_in_room=["example.com"], + ) + ) + ) + + with patch.object( + fed_client, "make_membership_event", mock_make_membership_event + ), patch.object(fed_client, "send_join", mock_send_join): + # Join and check that our join event is rejected + # (The join event is rejected because it doesn't have any signatures) + join_exc = self.get_failure( + fed_handler.do_invite_join(["example.com"], room_id, "@alice:test", {}), + SynapseError, + ) + self.assertIn("Join event was rejected", str(join_exc)) + + store = self.hs.get_datastores().main + + # Check that we don't have a left-over partial_state entry. + self.assertFalse( + self.get_success(store.is_partial_state_room(room_id)), + f"Stale partial-stated room flag left over for {room_id} after a" + f" failed do_invite_join!", + ) -- cgit 1.4.1 From 4e80ca224308e0b382c4a951873ebd69f80db001 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 2 Aug 2022 11:04:08 +0100 Subject: 1.64.0 --- CHANGES.md | 15 +++++++++++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 78c5d3038a..5e1da96bcf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.64.0 (2022-08-02) +=========================== + +No significant changes since 1.64.0rc2. + + +Deprecation Warning +------------------- + +Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. + +If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf. +[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email) + + Synapse 1.64.0rc2 (2022-07-29) ============================== diff --git a/debian/changelog b/debian/changelog index 6a2dbdf322..9efcb4f132 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.64.0) stable; urgency=medium + + * New Synapse release 1.64.0. + + -- Synapse Packaging team Tue, 02 Aug 2022 10:32:30 +0100 + matrix-synapse-py3 (1.64.0~rc2) stable; urgency=medium * New Synapse release 1.64.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 4e8f7ae4f1..af7def0c53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.64.0rc2" +version = "1.64.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From cb209638ea8f5ad1ea069e850d295fdadf198e68 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 2 Aug 2022 11:10:26 +0100 Subject: Add upgrade notes --- docs/upgrade.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/upgrade.md b/docs/upgrade.md index 73ed209975..47a74b67de 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -91,6 +91,16 @@ process, for example: # Upgrading to v1.64.0 +## Deprecation of the ability to delegate e-mail verification to identity servers + +Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. + +If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf. +[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email) + +The option that will be removed is `account_threepid_delegates.email`. + + ## Changes to the event replication streams Synapse now includes a flag indicating if an event is an outlier when -- cgit 1.4.1 From c2f48712265de92bd833a15f86935bed1f1efbe5 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 2 Aug 2022 11:19:32 +0100 Subject: Mention specific version in rc2 notes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 5e1da96bcf..0e69f25e0e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -16,7 +16,7 @@ If you require your homeserver to verify e-mail addresses or to support password Synapse 1.64.0rc2 (2022-07-29) ============================== -This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in a future release. ([\#13406](https://github.com/matrix-org/synapse/issues/13406)) +This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse v1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406)) Synapse 1.64.0rc1 (2022-07-26) -- cgit 1.4.1 From 8d317f6da5aa2efc030990c7036cdb4f384c704a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 2 Aug 2022 12:12:44 +0100 Subject: Fix error when out of servers to sync partial state with (#13432) so that we raise the intended error instead. Signed-off-by: Sean Quah --- changelog.d/13432.bugfix | 1 + synapse/handlers/federation.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13432.bugfix diff --git a/changelog.d/13432.bugfix b/changelog.d/13432.bugfix new file mode 100644 index 0000000000..bb99616afc --- /dev/null +++ b/changelog.d/13432.bugfix @@ -0,0 +1 @@ +Faster room joins: Fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1cf6cb32e3..57ad6e5dce 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1543,15 +1543,16 @@ class FederationHandler: # Make an infinite iterator of destinations to try. Once we find a working # destination, we'll stick with it until it flakes. + destinations: Collection[str] if initial_destination is not None: # Move `initial_destination` to the front of the list. destinations = list(other_destinations) if initial_destination in destinations: destinations.remove(initial_destination) destinations = [initial_destination] + destinations - destination_iter = itertools.cycle(destinations) else: - destination_iter = itertools.cycle(other_destinations) + destinations = other_destinations + destination_iter = itertools.cycle(destinations) # `destination` is the current remote homeserver we're pulling from. destination = next(destination_iter) -- cgit 1.4.1 From 1c910e2216a2f7fa57971ffbdede509fd493ebb0 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 2 Aug 2022 15:56:28 +0000 Subject: Add a `merge-back` command to the release script, which automates merging the correct branches after a release. (#13393) --- changelog.d/13393.misc | 1 + scripts-dev/release.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 changelog.d/13393.misc diff --git a/changelog.d/13393.misc b/changelog.d/13393.misc new file mode 100644 index 0000000000..be2b0153ea --- /dev/null +++ b/changelog.d/13393.misc @@ -0,0 +1 @@ +Add a `merge-back` command to the release script, which automates merging the correct branches after a release. \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 5bfd750118..46220c4dd3 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -32,6 +32,7 @@ import click import commonmark import git from click.exceptions import ClickException +from git import GitCommandError, Repo from github import Github from packaging import version @@ -78,6 +79,8 @@ def cli() -> None: # Optional: generate some nice links for the announcement + ./scripts-dev/release.py merge-back + ./scripts-dev/release.py announce If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the @@ -441,6 +444,79 @@ def upload() -> None: ) +def _merge_into(repo: Repo, source: str, target: str) -> None: + """ + Merges branch `source` into branch `target`. + Pulls both before merging and pushes the result. + """ + + # Update our branches and switch to the target branch + for branch in [source, target]: + click.echo(f"Switching to {branch} and pulling...") + repo.heads[branch].checkout() + # Pull so we're up to date + repo.remote().pull() + + assert repo.active_branch.name == target + + try: + # TODO This seemed easier than using GitPython directly + click.echo(f"Merging {source}...") + repo.git.merge(source) + except GitCommandError as exc: + # If a merge conflict occurs, give some context and try to + # make it easy to abort if necessary. + click.echo(exc) + if not click.confirm( + f"Likely merge conflict whilst merging ({source} → {target}). " + f"Have you resolved it?" + ): + repo.git.merge("--abort") + return + + # Push result. + click.echo("Pushing...") + repo.remote().push() + + +@cli.command() +def merge_back() -> None: + """Merge the release branch back into the appropriate branches. + All branches will be automatically pulled from the remote and the results + will be pushed to the remote.""" + + synapse_repo = get_repo_and_check_clean_checkout() + branch_name = synapse_repo.active_branch.name + + if not branch_name.startswith("release-v"): + raise RuntimeError("Not on a release branch. This does not seem sensible.") + + # Pull so we're up to date + synapse_repo.remote().pull() + + current_version = get_package_version() + + if current_version.is_prerelease: + # Release candidate + if click.confirm(f"Merge {branch_name} → develop?", default=True): + _merge_into(synapse_repo, branch_name, "develop") + else: + # Full release + sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest") + + if click.confirm(f"Merge {branch_name} → master?", default=True): + _merge_into(synapse_repo, branch_name, "master") + + if click.confirm("Merge master → develop?", default=True): + _merge_into(synapse_repo, "master", "develop") + + if click.confirm(f"On SyTest, merge {branch_name} → master?", default=True): + _merge_into(sytest_repo, branch_name, "master") + + if click.confirm("On SyTest, merge master → develop?", default=True): + _merge_into(sytest_repo, "master", "develop") + + @cli.command() def announce() -> None: """Generate markdown to announce the release.""" -- cgit 1.4.1 From ec6758d47253fd6f0cee567f38c8996e35a4de7a Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 3 Aug 2022 10:41:57 +0200 Subject: Fix wrong headline for `url_preview_accept_language` in docs (#13437) Fixes: #13433 --- changelog.d/13437.doc | 1 + docs/usage/configuration/config_documentation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13437.doc diff --git a/changelog.d/13437.doc b/changelog.d/13437.doc new file mode 100644 index 0000000000..fb772b24dc --- /dev/null +++ b/changelog.d/13437.doc @@ -0,0 +1 @@ +Fix wrong headline for `url_preview_accept_language` in documentation. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 6f8d7b7d26..d072240abf 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1859,7 +1859,7 @@ Example configuration: max_spider_size: 8M ``` --- -### `url_preview_language` +### `url_preview_accept_language` A list of values for the Accept-Language HTTP header used when downloading webpages during URL preview generation. This allows -- cgit 1.4.1 From 5eccfdfafdf957739697d667c05fde829cc0f8bc Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 3 Aug 2022 11:19:20 +0200 Subject: Remove 'Contents' section from the Configuration Manual (#13438) Fixes: #13053 --- changelog.d/13438.doc | 1 + docs/usage/configuration/config_documentation.md | 43 ------------------------ 2 files changed, 1 insertion(+), 43 deletions(-) create mode 100644 changelog.d/13438.doc diff --git a/changelog.d/13438.doc b/changelog.d/13438.doc new file mode 100644 index 0000000000..163b63ffc6 --- /dev/null +++ b/changelog.d/13438.doc @@ -0,0 +1 @@ +Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index d072240abf..2e2e59195b 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -72,49 +72,6 @@ apply if you want your config file to be read properly. A few helpful things to In addition, each setting has an example of its usage, with the proper indentation shown. -## Contents -[Modules](#modules) - -[Server](#server) - -[Homeserver Blocking](#homeserver-blocking) - -[TLS](#tls) - -[Federation](#federation) - -[Caching](#caching) - -[Database](#database) - -[Logging](#logging) - -[Ratelimiting](#ratelimiting) - -[Media Store](#media-store) - -[Captcha](#captcha) - -[TURN](#turn) - -[Registration](#registration) - -[API Configuration](#api-configuration) - -[Signing Keys](#signing-keys) - -[Single Sign On Integration](#single-sign-on-integration) - -[Push](#push) - -[Rooms](#rooms) - -[Opentracing](#opentracing) - -[Workers](#workers) - -[Background Updates](#background-updates) - ## Modules Server admins can expand Synapse's functionality with external modules. -- cgit 1.4.1 From 570bf32bbb9b4bd31a9783746187769613591499 Mon Sep 17 00:00:00 2001 From: Matt C <96466754+buffless-matt@users.noreply.github.com> Date: Wed, 3 Aug 2022 19:25:36 +1000 Subject: Add module API method to resolve a room alias to a room ID (#13428) Co-authored-by: MattC Co-authored-by: Brendan Abolivier --- changelog.d/13428.feature | 1 + synapse/module_api/__init__.py | 24 ++++++++++++++++++++++++ tests/module_api/test_api.py | 19 +++++++++++++++++++ 3 files changed, 44 insertions(+) create mode 100644 changelog.d/13428.feature diff --git a/changelog.d/13428.feature b/changelog.d/13428.feature new file mode 100644 index 0000000000..085b61483f --- /dev/null +++ b/changelog.d/13428.feature @@ -0,0 +1 @@ +Add a module API method to translate a room alias into a room ID. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 6d8bf54083..18d6d1058a 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1452,6 +1452,30 @@ class ModuleApi: start_timestamp, end_timestamp ) + async def lookup_room_alias(self, room_alias: str) -> Tuple[str, List[str]]: + """ + Get the room ID associated with a room alias. + + Added in Synapse v1.65.0. + + Args: + room_alias: The alias to look up. + + Returns: + A tuple of: + The room ID (str). + Hosts likely to be participating in the room ([str]). + + Raises: + SynapseError if room alias is invalid or could not be found. + """ + alias = RoomAlias.from_string(room_alias) + (room_id, hosts) = await self._hs.get_room_member_handler().lookup_room_alias( + alias + ) + + return room_id.to_string(), hosts + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 169e29b590..8e05590230 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -635,6 +635,25 @@ class ModuleApiTestCase(HomeserverTestCase): [{"set_tweak": "sound", "value": "default"}] ) + def test_lookup_room_alias(self) -> None: + """Test that modules can resolve a room alias to a room ID.""" + password = "password" + user_id = self.register_user("user", password) + access_token = self.login(user_id, password) + room_alias = "my-alias" + reference_room_id = self.helper.create_room_as( + tok=access_token, extra_content={"room_alias_name": room_alias} + ) + self.assertIsNotNone(reference_room_id) + + (room_id, _) = self.get_success( + self.module_api.lookup_room_alias( + f"#{room_alias}:{self.module_api.server_name}" + ) + ) + + self.assertEqual(room_id, reference_room_id) + class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): """For testing ModuleApi functionality in a multi-worker setup""" -- cgit 1.4.1 From d6e94ad9d9b22efe0f5eb64d946c10a542068f7c Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 3 Aug 2022 11:40:20 +0200 Subject: Rename `RateLimitConfig` to `RatelimitSettings` (#13442) --- changelog.d/13442.misc | 1 + synapse/api/ratelimiting.py | 6 +++--- synapse/config/ratelimiting.py | 42 ++++++++++++++++++++--------------------- synapse/rest/client/register.py | 4 ++-- synapse/util/ratelimitutils.py | 6 +++--- 5 files changed, 30 insertions(+), 29 deletions(-) create mode 100644 changelog.d/13442.misc diff --git a/changelog.d/13442.misc b/changelog.d/13442.misc new file mode 100644 index 0000000000..f503bc79d3 --- /dev/null +++ b/changelog.d/13442.misc @@ -0,0 +1 @@ +Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. \ No newline at end of file diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index f43965c1c8..044c7d4926 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -17,7 +17,7 @@ from collections import OrderedDict from typing import Hashable, Optional, Tuple from synapse.api.errors import LimitExceededError -from synapse.config.ratelimiting import RateLimitConfig +from synapse.config.ratelimiting import RatelimitSettings from synapse.storage.databases.main import DataStore from synapse.types import Requester from synapse.util import Clock @@ -314,8 +314,8 @@ class RequestRatelimiter: self, store: DataStore, clock: Clock, - rc_message: RateLimitConfig, - rc_admin_redaction: Optional[RateLimitConfig], + rc_message: RatelimitSettings, + rc_admin_redaction: Optional[RatelimitSettings], ): self.store = store self.clock = clock diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 5a91917b4a..1ed001e105 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -21,7 +21,7 @@ from synapse.types import JsonDict from ._base import Config -class RateLimitConfig: +class RatelimitSettings: def __init__( self, config: Dict[str, float], @@ -34,7 +34,7 @@ class RateLimitConfig: @attr.s(auto_attribs=True) -class FederationRateLimitConfig: +class FederationRatelimitSettings: window_size: int = 1000 sleep_limit: int = 10 sleep_delay: int = 500 @@ -50,11 +50,11 @@ class RatelimitConfig(Config): # Load the new-style messages config if it exists. Otherwise fall back # to the old method. if "rc_message" in config: - self.rc_message = RateLimitConfig( + self.rc_message = RatelimitSettings( config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0} ) else: - self.rc_message = RateLimitConfig( + self.rc_message = RatelimitSettings( { "per_second": config.get("rc_messages_per_second", 0.2), "burst_count": config.get("rc_message_burst_count", 10.0), @@ -64,9 +64,9 @@ class RatelimitConfig(Config): # Load the new-style federation config, if it exists. Otherwise, fall # back to the old method. if "rc_federation" in config: - self.rc_federation = FederationRateLimitConfig(**config["rc_federation"]) + self.rc_federation = FederationRatelimitSettings(**config["rc_federation"]) else: - self.rc_federation = FederationRateLimitConfig( + self.rc_federation = FederationRatelimitSettings( **{ k: v for k, v in { @@ -80,17 +80,17 @@ class RatelimitConfig(Config): } ) - self.rc_registration = RateLimitConfig(config.get("rc_registration", {})) + self.rc_registration = RatelimitSettings(config.get("rc_registration", {})) - self.rc_registration_token_validity = RateLimitConfig( + self.rc_registration_token_validity = RatelimitSettings( config.get("rc_registration_token_validity", {}), defaults={"per_second": 0.1, "burst_count": 5}, ) rc_login_config = config.get("rc_login", {}) - self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {})) - self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {})) - self.rc_login_failed_attempts = RateLimitConfig( + self.rc_login_address = RatelimitSettings(rc_login_config.get("address", {})) + self.rc_login_account = RatelimitSettings(rc_login_config.get("account", {})) + self.rc_login_failed_attempts = RatelimitSettings( rc_login_config.get("failed_attempts", {}) ) @@ -101,20 +101,20 @@ class RatelimitConfig(Config): rc_admin_redaction = config.get("rc_admin_redaction") self.rc_admin_redaction = None if rc_admin_redaction: - self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction) + self.rc_admin_redaction = RatelimitSettings(rc_admin_redaction) - self.rc_joins_local = RateLimitConfig( + self.rc_joins_local = RatelimitSettings( config.get("rc_joins", {}).get("local", {}), defaults={"per_second": 0.1, "burst_count": 10}, ) - self.rc_joins_remote = RateLimitConfig( + self.rc_joins_remote = RatelimitSettings( config.get("rc_joins", {}).get("remote", {}), defaults={"per_second": 0.01, "burst_count": 10}, ) # Track the rate of joins to a given room. If there are too many, temporarily # prevent local joins and remote joins via this server. - self.rc_joins_per_room = RateLimitConfig( + self.rc_joins_per_room = RatelimitSettings( config.get("rc_joins_per_room", {}), defaults={"per_second": 1, "burst_count": 10}, ) @@ -124,31 +124,31 @@ class RatelimitConfig(Config): # * For requests received over federation this is keyed by the origin. # # Note that this isn't exposed in the configuration as it is obscure. - self.rc_key_requests = RateLimitConfig( + self.rc_key_requests = RatelimitSettings( config.get("rc_key_requests", {}), defaults={"per_second": 20, "burst_count": 100}, ) - self.rc_3pid_validation = RateLimitConfig( + self.rc_3pid_validation = RatelimitSettings( config.get("rc_3pid_validation") or {}, defaults={"per_second": 0.003, "burst_count": 5}, ) - self.rc_invites_per_room = RateLimitConfig( + self.rc_invites_per_room = RatelimitSettings( config.get("rc_invites", {}).get("per_room", {}), defaults={"per_second": 0.3, "burst_count": 10}, ) - self.rc_invites_per_user = RateLimitConfig( + self.rc_invites_per_user = RatelimitSettings( config.get("rc_invites", {}).get("per_user", {}), defaults={"per_second": 0.003, "burst_count": 5}, ) - self.rc_invites_per_issuer = RateLimitConfig( + self.rc_invites_per_issuer = RatelimitSettings( config.get("rc_invites", {}).get("per_issuer", {}), defaults={"per_second": 0.3, "burst_count": 10}, ) - self.rc_third_party_invite = RateLimitConfig( + self.rc_third_party_invite = RatelimitSettings( config.get("rc_third_party_invite", {}), defaults={ "per_second": self.rc_message.per_second, diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index b7ab090bbd..956c45e60a 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -33,7 +33,7 @@ from synapse.api.ratelimiting import Ratelimiter from synapse.config import ConfigError from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig -from synapse.config.ratelimiting import FederationRateLimitConfig +from synapse.config.ratelimiting import FederationRatelimitSettings from synapse.config.server import is_threepid_reserved from synapse.handlers.auth import AuthHandler from synapse.handlers.ui_auth import UIAuthSessionDataConstants @@ -325,7 +325,7 @@ class UsernameAvailabilityRestServlet(RestServlet): self.registration_handler = hs.get_registration_handler() self.ratelimiter = FederationRateLimiter( hs.get_clock(), - FederationRateLimitConfig( + FederationRatelimitSettings( # Time window of 2s window_size=2000, # Artificially delay requests if rate > sleep_limit/window_size diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index dfe628c97e..6394cc39ac 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -21,7 +21,7 @@ from typing import Any, DefaultDict, Iterator, List, Set from twisted.internet import defer from synapse.api.errors import LimitExceededError -from synapse.config.ratelimiting import FederationRateLimitConfig +from synapse.config.ratelimiting import FederationRatelimitSettings from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, @@ -36,7 +36,7 @@ logger = logging.getLogger(__name__) class FederationRateLimiter: - def __init__(self, clock: Clock, config: FederationRateLimitConfig): + def __init__(self, clock: Clock, config: FederationRatelimitSettings): def new_limiter() -> "_PerHostRatelimiter": return _PerHostRatelimiter(clock=clock, config=config) @@ -63,7 +63,7 @@ class FederationRateLimiter: class _PerHostRatelimiter: - def __init__(self, clock: Clock, config: FederationRateLimitConfig): + def __init__(self, clock: Clock, config: FederationRatelimitSettings): """ Args: clock -- cgit 1.4.1 From fb7a2cc4cc7f0b49cabeec08d4ceb2dd2350e945 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 3 Aug 2022 11:41:19 +0200 Subject: Update doc for setting `macaroon_secret_key` (#13443) * Update doc for setting `macaroon_secret_key` * newsfile --- changelog.d/13443.doc | 1 + docs/usage/configuration/config_documentation.md | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13443.doc diff --git a/changelog.d/13443.doc b/changelog.d/13443.doc new file mode 100644 index 0000000000..0db5d1b3b4 --- /dev/null +++ b/changelog.d/13443.doc @@ -0,0 +1 @@ +Update documentation for config setting `macaroon_secret_key`. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 2e2e59195b..3a9466a837 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2495,9 +2495,13 @@ track_appservice_user_ips: true --- ### `macaroon_secret_key` -A secret which is used to sign access tokens. If none is specified, -the `registration_shared_secret` is used, if one is given; otherwise, -a secret key is derived from the signing key. +A secret which is used to sign +- access token for guest users, +- short-term login token used during SSO logins (OIDC or SAML2) and +- token used for unsubscribing from email notifications. + +If none is specified, the `registration_shared_secret` is used, if one is given; +otherwise, a secret key is derived from the signing key. Example configuration: ```yaml -- cgit 1.4.1 From 668597214f0cfeb9d7a642ef0564e73198b7bc82 Mon Sep 17 00:00:00 2001 From: jejo86 <28619134+jejo86@users.noreply.github.com> Date: Wed, 3 Aug 2022 12:15:23 +0200 Subject: Improve documentation on becoming server admin (#13230) * Improved section regarding server admin Added steps describing how to elevate an existing user to administrator by manipulating a `postgres` database. Signed-off-by: jejo86 28619134+jejo86@users.noreply.github.com * Improved section regarding server admin * Reference database settings Add instructions to check database settings to find out the database name, instead of listing all available PostgreSQL databases. * Add suggestions from PR conversation Replace config filename `homeserver.yaml`. with "config file". Remove instructions to switch to `postgres` user. Add instructions how to connect to SQLite database. * Update changelog.d/13230.doc Co-authored-by: reivilibre --- changelog.d/13230.doc | 1 + docs/usage/administration/admin_api/README.md | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13230.doc diff --git a/changelog.d/13230.doc b/changelog.d/13230.doc new file mode 100644 index 0000000000..dce7be2425 --- /dev/null +++ b/changelog.d/13230.doc @@ -0,0 +1 @@ +Add steps describing how to elevate an existing user to administrator by manipulating the database. diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md index c60b6da0de..f11e0b19a6 100644 --- a/docs/usage/administration/admin_api/README.md +++ b/docs/usage/administration/admin_api/README.md @@ -5,8 +5,9 @@ Many of the API calls in the admin api will require an `access_token` for a server admin. (Note that a server admin is distinct from a room admin.) -A user can be marked as a server admin by updating the database directly, e.g.: +An existing user can be marked as a server admin by updating the database directly. +Check your [database settings](config_documentation.md#database) in the configuration file, connect to the correct database using either `psql [database name]` (if using PostgreSQL) or `sqlite3 path/to/your/database.db` (if using SQLite) and elevate the user `@foo:bar.com` to administrator. ```sql UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; ``` -- cgit 1.4.1 From 503a95804e61be692abb2c872e03284efc541afb Mon Sep 17 00:00:00 2001 From: Jasper Spaans Date: Wed, 3 Aug 2022 12:16:32 +0200 Subject: Install cryptography build dependencies in requirements image. (#13372) --- changelog.d/13372.docker | 1 + docker/Dockerfile | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13372.docker diff --git a/changelog.d/13372.docker b/changelog.d/13372.docker new file mode 100644 index 0000000000..238c78de09 --- /dev/null +++ b/changelog.d/13372.docker @@ -0,0 +1 @@ +Make docker images build on armv7 by installing cryptography dependencies in the "requirements" stage. Contributed by Jasper Spaans. \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 97bb03b08f..fa58ae3acb 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -40,7 +40,8 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as requirements RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update -qq && apt-get install -yqq git \ + apt-get update -qq && apt-get install -yqq \ + build-essential cargo git libffi-dev libssl-dev \ && rm -rf /var/lib/apt/lists/* # We install poetry in its own build stage to avoid its dependencies conflicting with -- cgit 1.4.1 From 78a3111c41bf93fd52774965af50d62b74d937de Mon Sep 17 00:00:00 2001 From: andrew do Date: Wed, 3 Aug 2022 05:26:31 -0700 Subject: Return 404 or member list when getting joined_members after leaving (#13374) Signed-off-by: Andrew Doh Co-authored-by: Patrick Cloke Co-authored-by: Andrew Morgan Co-authored-by: Brendan Abolivier --- changelog.d/13374.bugfix | 1 + synapse/handlers/message.py | 6 ++++-- tests/rest/admin/test_room.py | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13374.bugfix diff --git a/changelog.d/13374.bugfix b/changelog.d/13374.bugfix new file mode 100644 index 0000000000..1c5bd1b363 --- /dev/null +++ b/changelog.d/13374.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. \ No newline at end of file diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e85b540451..ee0773988e 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -324,8 +324,10 @@ class MessageHandler: room_id, user_id, allow_departed_users=True ) if membership != Membership.JOIN: - raise NotImplementedError( - "Getting joined members after leaving is not implemented" + raise SynapseError( + code=403, + errcode=Codes.FORBIDDEN, + msg="Getting joined members while not being a current member of the room is forbidden.", ) users_with_profile = await self.store.get_users_in_room_with_profiles(room_id) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 623883b53c..989cbdb5e2 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1772,6 +1772,21 @@ class RoomTestCase(unittest.HomeserverTestCase): tok=admin_user_tok, ) + def test_get_joined_members_after_leave_room(self) -> None: + """Test that requesting room members after leaving the room raises a 403 error.""" + + # create the room + user = self.register_user("foo", "pass") + user_tok = self.login("foo", "pass") + room_id = self.helper.create_room_as(user, tok=user_tok) + self.helper.leave(room_id, user, tok=user_tok) + + # delete the rooms and get joined roomed membership + url = f"/_matrix/client/r0/rooms/{room_id}/joined_members" + channel = self.make_request("GET", url.encode("ascii"), access_token=user_tok) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + class JoinAliasRoomTestCase(unittest.HomeserverTestCase): -- cgit 1.4.1 From 92d21faf12c982a8d27ad465eb94f2fed0e8b32f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 3 Aug 2022 10:57:38 -0500 Subject: Instrument `/messages` for understandable traces in Jaeger (#13368) In Jaeger: - Before: huge list of uncategorized database calls - After: nice and collapsible into units of work --- changelog.d/13368.misc | 1 + synapse/api/auth.py | 8 +++++++- synapse/federation/federation_client.py | 2 ++ synapse/handlers/federation.py | 2 ++ synapse/handlers/federation_event.py | 5 +++++ synapse/handlers/pagination.py | 2 ++ synapse/handlers/relations.py | 2 ++ synapse/storage/controllers/state.py | 5 +++++ synapse/storage/databases/main/stream.py | 2 ++ synapse/streams/events.py | 2 ++ synapse/visibility.py | 2 ++ 11 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13368.misc diff --git a/changelog.d/13368.misc b/changelog.d/13368.misc new file mode 100644 index 0000000000..4b433a5107 --- /dev/null +++ b/changelog.d/13368.misc @@ -0,0 +1 @@ +Instrument `/messages` for understandable traces in Jaeger. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 82e6475ef5..523bad0c55 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -31,7 +31,12 @@ from synapse.api.errors import ( from synapse.appservice import ApplicationService from synapse.http import get_request_user_agent from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import active_span, force_tracing, start_active_span +from synapse.logging.opentracing import ( + active_span, + force_tracing, + start_active_span, + trace, +) from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import Requester, UserID, create_requester @@ -567,6 +572,7 @@ class Auth: return query_params[0].decode("ascii") + @trace async def check_user_in_room_or_world_readable( self, room_id: str, user_id: str, allow_departed_users: bool = False ) -> Tuple[str, Optional[str]]: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 6a8d76529b..54ffbd8170 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -61,6 +61,7 @@ from synapse.federation.federation_base import ( ) from synapse.federation.transport.client import SendJoinResponse from synapse.http.types import QueryParams +from synapse.logging.opentracing import trace from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -233,6 +234,7 @@ class FederationClient(FederationBase): destination, content, timeout ) + @trace async def backfill( self, dest: str, room_id: str, limit: int, extremities: Collection[str] ) -> Optional[List[EventBase]]: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 57ad6e5dce..30f1585a85 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -59,6 +59,7 @@ from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context +from synapse.logging.opentracing import trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import NOT_SPAM from synapse.replication.http.federation import ( @@ -180,6 +181,7 @@ class FederationHandler: "resume_sync_partial_state_room", self._resume_sync_partial_state_room ) + @trace async def maybe_backfill( self, room_id: str, current_depth: int, limit: int ) -> bool: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 91d1439191..8968b705d4 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -59,6 +59,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.federation.federation_client import InvalidResponseError from synapse.logging.context import nested_logging_context +from synapse.logging.opentracing import trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.replication.http.federation import ( @@ -566,6 +567,7 @@ class FederationEventHandler: event.event_id ) + @trace async def backfill( self, dest: str, room_id: str, limit: int, extremities: Collection[str] ) -> None: @@ -610,6 +612,7 @@ class FederationEventHandler: backfilled=True, ) + @trace async def _get_missing_events_for_pdu( self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int ) -> None: @@ -710,6 +713,7 @@ class FederationEventHandler: logger.info("Got %d prev_events", len(missing_events)) await self._process_pulled_events(origin, missing_events, backfilled=False) + @trace async def _process_pulled_events( self, origin: str, events: Iterable[EventBase], backfilled: bool ) -> None: @@ -748,6 +752,7 @@ class FederationEventHandler: with nested_logging_context(ev.event_id): await self._process_pulled_event(origin, ev, backfilled=backfilled) + @trace async def _process_pulled_event( self, origin: str, event: EventBase, backfilled: bool ) -> None: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 6262a35822..e1e34e3b16 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -24,6 +24,7 @@ from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import SerializeEventConfig from synapse.handlers.room import ShutdownRoomResponse +from synapse.logging.opentracing import trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig @@ -416,6 +417,7 @@ class PaginationHandler: await self._storage_controllers.purge_events.purge_room(room_id) + @trace async def get_messages( self, requester: Requester, diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 8f797e3ae9..72d25df8c8 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -19,6 +19,7 @@ import attr from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError from synapse.events import EventBase, relation_from_event +from synapse.logging.opentracing import trace from synapse.storage.databases.main.relations import _RelatedEvent from synapse.types import JsonDict, Requester, StreamToken, UserID from synapse.visibility import filter_events_for_client @@ -361,6 +362,7 @@ class RelationsHandler: return results + @trace async def get_bundled_aggregations( self, events: Iterable[EventBase], user_id: str ) -> Dict[str, BundledAggregations]: diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 1e35046e07..0d480f1014 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -29,6 +29,7 @@ from typing import ( from synapse.api.constants import EventTypes from synapse.events import EventBase +from synapse.logging.opentracing import trace from synapse.storage.state import StateFilter from synapse.storage.util.partial_state_events_tracker import ( PartialCurrentStateTracker, @@ -179,6 +180,7 @@ class StateStorageController: return self.stores.state._get_state_groups_from_groups(groups, state_filter) + @trace async def get_state_for_events( self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[EventBase]]: @@ -225,6 +227,7 @@ class StateStorageController: return {event: event_to_state[event] for event in event_ids} + @trace async def get_state_ids_for_events( self, event_ids: Collection[str], @@ -287,6 +290,7 @@ class StateStorageController: ) return state_map[event_id] + @trace async def get_state_ids_for_event( self, event_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: @@ -327,6 +331,7 @@ class StateStorageController: groups, state_filter or StateFilter.all() ) + @trace async def get_state_group_for_events( self, event_ids: Collection[str], diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 2590b52f73..a347430aa7 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -58,6 +58,7 @@ from twisted.internet import defer from synapse.api.filtering import Filter from synapse.events import EventBase from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.logging.opentracing import trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -1346,6 +1347,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return rows, next_token + @trace async def paginate_room_events( self, room_id: str, diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 54e0b1a23b..bcd840bd88 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -21,6 +21,7 @@ from synapse.handlers.presence import PresenceEventSource from synapse.handlers.receipts import ReceiptEventSource from synapse.handlers.room import RoomEventSource from synapse.handlers.typing import TypingNotificationEventSource +from synapse.logging.opentracing import trace from synapse.streams import EventSource from synapse.types import StreamToken @@ -69,6 +70,7 @@ class EventSources: ) return token + @trace async def get_current_token_for_pagination(self, room_id: str) -> StreamToken: """Get the current token for a given room to be used to paginate events. diff --git a/synapse/visibility.py b/synapse/visibility.py index 9abbaa5a64..d947edde66 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -23,6 +23,7 @@ from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.utils import prune_event +from synapse.logging.opentracing import trace from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore from synapse.storage.state import StateFilter @@ -51,6 +52,7 @@ MEMBERSHIP_PRIORITY = ( _HISTORY_VIS_KEY: Final[Tuple[str, str]] = (EventTypes.RoomHistoryVisibility, "") +@trace async def filter_events_for_client( storage: StorageControllers, user_id: str, -- cgit 1.4.1 From a648a06d52715d0d4ad1ec72d042df1b3fd1be71 Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 3 Aug 2022 10:19:34 -0700 Subject: Add some tracing spans to give insight into local joins (#13439) --- changelog.d/13439.misc | 1 + synapse/handlers/message.py | 15 ++++++----- synapse/handlers/room_member.py | 57 ++++++++++++++++++++++------------------- 3 files changed, 40 insertions(+), 33 deletions(-) create mode 100644 changelog.d/13439.misc diff --git a/changelog.d/13439.misc b/changelog.d/13439.misc new file mode 100644 index 0000000000..4aa73d7075 --- /dev/null +++ b/changelog.d/13439.misc @@ -0,0 +1 @@ +Add some tracing to give more insight into local room joins. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ee0773988e..6b03603598 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -52,6 +52,7 @@ from synapse.events.builder import EventBuilder from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.handlers.directory import DirectoryHandler +from synapse.logging import opentracing from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet @@ -1374,9 +1375,10 @@ class EventCreationHandler: # and `state_groups` because they have `prev_events` that aren't persisted yet # (historical messages persisted in reverse-chronological order). if not event.internal_metadata.is_historical(): - await self._bulk_push_rule_evaluator.action_for_event_by_user( - event, context - ) + with opentracing.start_active_span("calculate_push_actions"): + await self._bulk_push_rule_evaluator.action_for_event_by_user( + event, context + ) try: # If we're a worker we need to hit out to the master. @@ -1463,9 +1465,10 @@ class EventCreationHandler: state = await state_entry.get_state( self._storage_controllers.state, StateFilter.all() ) - joined_hosts = await self.store.get_joined_hosts( - event.room_id, state, state_entry - ) + with opentracing.start_active_span("get_joined_hosts"): + joined_hosts = await self.store.get_joined_hosts( + event.room_id, state, state_entry + ) # Note that the expiry times must be larger than the expiry time in # _external_cache_joined_hosts_updates. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 520c52e013..70dc69c809 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -32,6 +32,7 @@ from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN +from synapse.logging import opentracing from synapse.module_api import NOT_SPAM from synapse.storage.state import StateFilter from synapse.types import ( @@ -428,14 +429,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): await self._join_rate_per_room_limiter.ratelimit( requester, key=room_id, update=False ) - - result_event = await self.event_creation_handler.handle_new_client_event( - requester, - event, - context, - extra_users=[target], - ratelimit=ratelimit, - ) + with opentracing.start_active_span("handle_new_client_event"): + result_event = await self.event_creation_handler.handle_new_client_event( + requester, + event, + context, + extra_users=[target], + ratelimit=ratelimit, + ) if event.membership == Membership.LEAVE: if prev_member_event_id: @@ -564,25 +565,26 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # by application services), and then by room ID. async with self.member_as_limiter.queue(as_id): async with self.member_linearizer.queue(key): - result = await self.update_membership_locked( - requester, - target, - room_id, - action, - txn_id=txn_id, - remote_room_hosts=remote_room_hosts, - third_party_signed=third_party_signed, - ratelimit=ratelimit, - content=content, - new_room=new_room, - require_consent=require_consent, - outlier=outlier, - historical=historical, - allow_no_prev_events=allow_no_prev_events, - prev_event_ids=prev_event_ids, - state_event_ids=state_event_ids, - depth=depth, - ) + with opentracing.start_active_span("update_membership_locked"): + result = await self.update_membership_locked( + requester, + target, + room_id, + action, + txn_id=txn_id, + remote_room_hosts=remote_room_hosts, + third_party_signed=third_party_signed, + ratelimit=ratelimit, + content=content, + new_room=new_room, + require_consent=require_consent, + outlier=outlier, + historical=historical, + allow_no_prev_events=allow_no_prev_events, + prev_event_ids=prev_event_ids, + state_event_ids=state_event_ids, + depth=depth, + ) return result @@ -649,6 +651,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): Returns: A tuple of the new event ID and stream ID. """ + content_specified = bool(content) if content is None: content = {} -- cgit 1.4.1 From 845732be450b3f9c991df35b2f07d600a0eca6dd Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 4 Aug 2022 11:02:29 +0200 Subject: Fix rooms not being properly excluded from incremental sync (#13408) --- changelog.d/13408.bugfix | 1 + synapse/handlers/sync.py | 25 +++++++++++++++---------- tests/rest/client/test_sync.py | 21 +++++++++++++++++++++ 3 files changed, 37 insertions(+), 10 deletions(-) create mode 100644 changelog.d/13408.bugfix diff --git a/changelog.d/13408.bugfix b/changelog.d/13408.bugfix new file mode 100644 index 0000000000..8b87b2cf7b --- /dev/null +++ b/changelog.d/13408.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index d42a414c90..d827c03ad1 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1536,15 +1536,13 @@ class SyncHandler: ignored_users = await self.store.ignored_users(user_id) if since_token: room_changes = await self._get_rooms_changed( - sync_result_builder, ignored_users, self.rooms_to_exclude + sync_result_builder, ignored_users ) tags_by_room = await self.store.get_updated_tags( user_id, since_token.account_data_key ) else: - room_changes = await self._get_all_rooms( - sync_result_builder, ignored_users, self.rooms_to_exclude - ) + room_changes = await self._get_all_rooms(sync_result_builder, ignored_users) tags_by_room = await self.store.get_tags_for_user(user_id) log_kv({"rooms_changed": len(room_changes.room_entries)}) @@ -1623,13 +1621,14 @@ class SyncHandler: self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str], - excluded_rooms: List[str], ) -> _RoomChanges: """Determine the changes in rooms to report to the user. This function is a first pass at generating the rooms part of the sync response. It determines which rooms have changed during the sync period, and categorises - them into four buckets: "knock", "invite", "join" and "leave". + them into four buckets: "knock", "invite", "join" and "leave". It also excludes + from that list any room that appears in the list of rooms to exclude from sync + results in the server configuration. 1. Finds all membership changes for the user in the sync period (from `since_token` up to `now_token`). @@ -1655,7 +1654,7 @@ class SyncHandler: # _have_rooms_changed. We could keep the results in memory to avoid a # second query, at the cost of more complicated source code. membership_change_events = await self.store.get_membership_changes_for_user( - user_id, since_token.room_key, now_token.room_key, excluded_rooms + user_id, since_token.room_key, now_token.room_key, self.rooms_to_exclude ) mem_change_events_by_room_id: Dict[str, List[EventBase]] = {} @@ -1862,7 +1861,6 @@ class SyncHandler: self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str], - ignored_rooms: List[str], ) -> _RoomChanges: """Returns entries for all rooms for the user. @@ -1884,7 +1882,7 @@ class SyncHandler: room_list = await self.store.get_rooms_for_local_user_where_membership_is( user_id=user_id, membership_list=Membership.LIST, - excluded_rooms=ignored_rooms, + excluded_rooms=self.rooms_to_exclude, ) room_entries = [] @@ -2150,7 +2148,9 @@ class SyncHandler: raise Exception("Unrecognized rtype: %r", room_builder.rtype) async def get_rooms_for_user_at( - self, user_id: str, room_key: RoomStreamToken + self, + user_id: str, + room_key: RoomStreamToken, ) -> FrozenSet[str]: """Get set of joined rooms for a user at the given stream ordering. @@ -2176,7 +2176,12 @@ class SyncHandler: # If the membership's stream ordering is after the given stream # ordering, we need to go and work out if the user was in the room # before. + # We also need to check whether the room should be excluded from sync + # responses as per the homeserver config. for joined_room in joined_rooms: + if joined_room.room_id in self.rooms_to_exclude: + continue + if not joined_room.event_pos.persisted_after(room_key): joined_room_ids.add(joined_room.room_id) continue diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index b085c50356..ae16184828 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -948,3 +948,24 @@ class ExcludeRoomTestCase(unittest.HomeserverTestCase): self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["invite"]) self.assertIn(self.included_room_id, channel.json_body["rooms"]["invite"]) + + def test_incremental_sync(self) -> None: + """Tests that activity in the room is properly filtered out of incremental + syncs. + """ + channel = self.make_request("GET", "/sync", access_token=self.tok) + self.assertEqual(channel.code, 200, channel.result) + next_batch = channel.json_body["next_batch"] + + self.helper.send(self.excluded_room_id, tok=self.tok) + self.helper.send(self.included_room_id, tok=self.tok) + + channel = self.make_request( + "GET", + f"/sync?since={next_batch}", + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["join"]) + self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"]) -- cgit 1.4.1 From a91078200dbf41f6929762381e02cdeb21ff07d1 Mon Sep 17 00:00:00 2001 From: Matt C <96466754+buffless-matt@users.noreply.github.com> Date: Thu, 4 Aug 2022 19:34:05 +1000 Subject: Add module API method to create a room (#13429) Co-authored-by: MattC Co-authored-by: Brendan Abolivier --- changelog.d/13429.feature | 1 + synapse/module_api/__init__.py | 51 ++++++++++++++++++++++++++++++++++++++++++ tests/module_api/test_api.py | 51 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 103 insertions(+) create mode 100644 changelog.d/13429.feature diff --git a/changelog.d/13429.feature b/changelog.d/13429.feature new file mode 100644 index 0000000000..f4f347e54e --- /dev/null +++ b/changelog.d/13429.feature @@ -0,0 +1 @@ +Add a module API method to create a room. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 18d6d1058a..71145870ee 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1476,6 +1476,57 @@ class ModuleApi: return room_id.to_string(), hosts + async def create_room( + self, + user_id: str, + config: JsonDict, + ratelimit: bool = True, + creator_join_profile: Optional[JsonDict] = None, + ) -> Tuple[str, Optional[str]]: + """Creates a new room. + + Added in Synapse v1.65.0. + + Args: + user_id: + The user who requested the room creation. + config : A dict of configuration options. See "Request body" of: + https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom + ratelimit: set to False to disable the rate limiter for this specific operation. + + creator_join_profile: + Set to override the displayname and avatar for the creating + user in this room. If unset, displayname and avatar will be + derived from the user's profile. If set, should contain the + values to go in the body of the 'join' event (typically + `avatar_url` and/or `displayname`. + + Returns: + A tuple containing: 1) the room ID (str), 2) if an alias was requested, + the room alias (str), otherwise None if no alias was requested. + + Raises: + ResourceLimitError if server is blocked to some resource being + exceeded. + RuntimeError if the user_id does not refer to a local user. + SynapseError if the user_id is invalid, room ID couldn't be stored, or + something went horribly wrong. + """ + if not self.is_mine(user_id): + raise RuntimeError( + "Tried to create a room as a user that isn't local to this homeserver", + ) + + requester = create_requester(user_id) + room_id_and_alias, _ = await self._hs.get_room_creation_handler().create_room( + requester=requester, + config=config, + ratelimit=ratelimit, + creator_join_profile=creator_join_profile, + ) + + return room_id_and_alias["room_id"], room_id_and_alias.get("room_alias", None) + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 8e05590230..9bf95472e1 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -654,6 +654,57 @@ class ModuleApiTestCase(HomeserverTestCase): self.assertEqual(room_id, reference_room_id) + def test_create_room(self) -> None: + """Test that modules can create a room.""" + # First test user validation (i.e. user is local). + self.get_failure( + self.module_api.create_room( + user_id=f"@user:{self.module_api.server_name}abc", + config={}, + ratelimit=False, + ), + RuntimeError, + ) + + # Now do the happy path. + user_id = self.register_user("user", "password") + access_token = self.login(user_id, "password") + + room_id, room_alias = self.get_success( + self.module_api.create_room( + user_id=user_id, config={"room_alias_name": "foo-bar"}, ratelimit=False + ) + ) + + # Check room creator. + channel = self.make_request( + "GET", + f"/_matrix/client/v3/rooms/{room_id}/state/m.room.create", + access_token=access_token, + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body["creator"], user_id) + + # Check room alias. + self.assertEquals(room_alias, f"#foo-bar:{self.module_api.server_name}") + + # Let's try a room with no alias. + room_id, room_alias = self.get_success( + self.module_api.create_room(user_id=user_id, config={}, ratelimit=False) + ) + + # Check room creator. + channel = self.make_request( + "GET", + f"/_matrix/client/v3/rooms/{room_id}/state/m.room.create", + access_token=access_token, + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body["creator"], user_id) + + # Check room alias. + self.assertIsNone(room_alias) + class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): """For testing ModuleApi functionality in a multi-worker setup""" -- cgit 1.4.1 From 166fafdf8da0d3482ce62cc8c2b1cd140aa419c0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 4 Aug 2022 12:59:27 +0100 Subject: synapse-workers docker: copy nginx and redis in from base images (#13447) Part of my continuing quest to make the docker images build quicker: copy nginx and redis in from base docker images, rather than apt installing each time. --- changelog.d/13447.misc | 1 + docker/Dockerfile-workers | 93 ++++++++++++++++++++------------- docker/conf-workers/supervisord.conf.j2 | 2 +- 3 files changed, 60 insertions(+), 36 deletions(-) create mode 100644 changelog.d/13447.misc diff --git a/changelog.d/13447.misc b/changelog.d/13447.misc new file mode 100644 index 0000000000..ede3ee91b8 --- /dev/null +++ b/changelog.d/13447.misc @@ -0,0 +1 @@ +Improve rebuild speed for the "synapse-workers" docker image. diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 84f836ff7b..003a1cc3bf 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -1,39 +1,62 @@ # syntax=docker/dockerfile:1 -# Inherit from the official Synapse docker image -ARG SYNAPSE_VERSION=latest -FROM matrixdotorg/synapse:$SYNAPSE_VERSION - -# Install deps -RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update -qq && \ - DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \ - redis-server nginx-light - -# Install supervisord with pip instead of apt, to avoid installing a second -# copy of python. -RUN --mount=type=cache,target=/root/.cache/pip \ - pip install supervisor~=4.2 - -# Disable the default nginx sites -RUN rm /etc/nginx/sites-enabled/default -# Copy Synapse worker, nginx and supervisord configuration template files -COPY ./docker/conf-workers/* /conf/ - -# Copy a script to prefix log lines with the supervisor program name -COPY ./docker/prefix-log /usr/local/bin/ - -# Expose nginx listener port -EXPOSE 8080/tcp +ARG SYNAPSE_VERSION=latest -# A script to read environment variables and create the necessary -# files to run the desired worker configuration. Will start supervisord. -COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py -ENTRYPOINT ["/configure_workers_and_start.py"] +# first of all, we create a base image with an nginx which we can copy into the +# target image. For repeated rebuilds, this is much faster than apt installing +# each time. + +FROM debian:bullseye-slim AS deps_base + RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update -qq && \ + DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \ + redis-server nginx-light + +# Similarly, a base to copy the redis server from. +# +# The redis docker image has fewer dynamic libraries than the debian package, +# which makes it much easier to copy (but we need to make sure we use an image +# based on the same debian version as the synapse image, to make sure we get +# the expected version of libc. +FROM redis:6-bullseye AS redis_base + +# now build the final image, based on the the regular Synapse docker image +FROM matrixdotorg/synapse:$SYNAPSE_VERSION -# Replace the healthcheck with one which checks *all* the workers. The script -# is generated by configure_workers_and_start.py. -HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ - CMD /bin/sh /healthcheck.sh + # Install supervisord with pip instead of apt, to avoid installing a second + # copy of python. + RUN --mount=type=cache,target=/root/.cache/pip \ + pip install supervisor~=4.2 + RUN mkdir -p /etc/supervisor/conf.d + + # Copy over redis and nginx + COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin + + COPY --from=deps_base /usr/sbin/nginx /usr/sbin + COPY --from=deps_base /usr/share/nginx /usr/share/nginx + COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx + COPY --from=deps_base /etc/nginx /etc/nginx + RUN rm /etc/nginx/sites-enabled/default + RUN mkdir /var/log/nginx /var/lib/nginx + RUN chown www-data /var/log/nginx /var/lib/nginx + + # Copy Synapse worker, nginx and supervisord configuration template files + COPY ./docker/conf-workers/* /conf/ + + # Copy a script to prefix log lines with the supervisor program name + COPY ./docker/prefix-log /usr/local/bin/ + + # Expose nginx listener port + EXPOSE 8080/tcp + + # A script to read environment variables and create the necessary + # files to run the desired worker configuration. Will start supervisord. + COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py + ENTRYPOINT ["/configure_workers_and_start.py"] + + # Replace the healthcheck with one which checks *all* the workers. The script + # is generated by configure_workers_and_start.py. + HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ + CMD /bin/sh /healthcheck.sh diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 index 086137494e..9f1e03cfc0 100644 --- a/docker/conf-workers/supervisord.conf.j2 +++ b/docker/conf-workers/supervisord.conf.j2 @@ -19,7 +19,7 @@ username=www-data autorestart=true [program:redis] -command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no +command=/usr/local/bin/prefix-log /usr/local/bin/redis-server priority=1 stdout_logfile=/dev/stdout stdout_logfile_maxbytes=0 -- cgit 1.4.1 From afbdbe063454e6748ba15b73866f1571f1e6ffd8 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 4 Aug 2022 14:03:36 +0200 Subject: Fix return value in example on `password_auth_provider_callbacks.md` (#13450) Fixes: #12534 Signed-off-by: Dirk Klimpel --- changelog.d/13450.doc | 1 + docs/modules/password_auth_provider_callbacks.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13450.doc diff --git a/changelog.d/13450.doc b/changelog.d/13450.doc new file mode 100644 index 0000000000..e272baff0a --- /dev/null +++ b/changelog.d/13450.doc @@ -0,0 +1 @@ +Fix example code in module documentation of `password_auth_provider_callbacks`. diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index ec810fd292..f6349d5404 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -263,7 +263,7 @@ class MyAuthProvider: return None if self.credentials.get(username) == login_dict.get("my_field"): - return self.api.get_qualified_user_id(username) + return (self.api.get_qualified_user_id(username), None) async def check_pass( self, @@ -280,5 +280,5 @@ class MyAuthProvider: return None if self.credentials.get(username) == login_dict.get("password"): - return self.api.get_qualified_user_id(username) + return (self.api.get_qualified_user_id(username), None) ``` -- cgit 1.4.1 From 6dd7fa12dc18835b18c0632b89c2563d806cd8ef Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 4 Aug 2022 14:06:02 +0200 Subject: Update some outdated information on `sso_mapping_providers.md` (#13449) --- changelog.d/13449.doc | 1 + docs/sso_mapping_providers.md | 14 ++++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13449.doc diff --git a/changelog.d/13449.doc b/changelog.d/13449.doc new file mode 100644 index 0000000000..cbe4f62b6e --- /dev/null +++ b/changelog.d/13449.doc @@ -0,0 +1 @@ +Update outdated information on `sso_mapping_providers` documentation. diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index 7b4ddc5b74..817499149f 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -22,7 +22,7 @@ choose their own username. In the first case - where users are automatically allocated a Matrix ID - it is the responsibility of the mapping provider to normalise the SSO attributes and map them to a valid Matrix ID. The [specification for Matrix -IDs](https://matrix.org/docs/spec/appendices#user-identifiers) has some +IDs](https://spec.matrix.org/latest/appendices/#user-identifiers) has some information about what is considered valid. If the mapping provider does not assign a Matrix ID, then Synapse will @@ -37,9 +37,10 @@ as Synapse). The Synapse config is then modified to point to the mapping provide ## OpenID Mapping Providers The OpenID mapping provider can be customized by editing the -`oidc_config.user_mapping_provider.module` config option. +[`oidc_providers.user_mapping_provider.module`](usage/configuration/config_documentation.md#oidc_providers) +config option. -`oidc_config.user_mapping_provider.config` allows you to provide custom +`oidc_providers.user_mapping_provider.config` allows you to provide custom configuration options to the module. Check with the module's documentation for what options it provides (if any). The options listed by default are for the user mapping provider built in to Synapse. If using a custom module, you should @@ -58,7 +59,7 @@ A custom mapping provider must specify the following methods: - This method should have the `@staticmethod` decoration. - Arguments: - `config` - A `dict` representing the parsed content of the - `oidc_config.user_mapping_provider.config` homeserver config option. + `oidc_providers.user_mapping_provider.config` homeserver config option. Runs on homeserver startup. Providers should extract and validate any option values they need here. - Whatever is returned will be passed back to the user mapping provider module's @@ -102,7 +103,7 @@ A custom mapping provider must specify the following methods: will be returned as part of the response during a successful login. Note that care should be taken to not overwrite any of the parameters - usually returned as part of the [login response](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login). + usually returned as part of the [login response](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login). ### Default OpenID Mapping Provider @@ -113,7 +114,8 @@ specified in the config. It is located at ## SAML Mapping Providers The SAML mapping provider can be customized by editing the -`saml2_config.user_mapping_provider.module` config option. +[`saml2_config.user_mapping_provider.module`](docs/usage/configuration/config_documentation.md#saml2_config) +config option. `saml2_config.user_mapping_provider.config` allows you to provide custom configuration options to the module. Check with the module's documentation for -- cgit 1.4.1 From 41320a0554716aaf7cec6172da98e002c48344c5 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Thu, 4 Aug 2022 15:49:55 +0100 Subject: Optimise async get event lookups (#13435) Still maintains local in memory lookup optimisation, but does any external lookup as part of the deferred that prevents duplicate lookups for the same event at once. This makes the assumption that fetching from an external cache is a non-zero load operation. --- changelog.d/13435.misc | 1 + synapse/storage/databases/main/events_worker.py | 75 ++++++++++++++++++++++--- synapse/storage/databases/main/roommember.py | 2 +- synapse/util/caches/lrucache.py | 17 ++++++ 4 files changed, 87 insertions(+), 8 deletions(-) create mode 100644 changelog.d/13435.misc diff --git a/changelog.d/13435.misc b/changelog.d/13435.misc new file mode 100644 index 0000000000..c01b9136c8 --- /dev/null +++ b/changelog.d/13435.misc @@ -0,0 +1 @@ +Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 29c99c6357..e9ff6cfb34 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -600,7 +600,11 @@ class EventsWorkerStore(SQLBaseStore): Returns: map from event id to result """ - event_entry_map = await self._get_events_from_cache( + # Shortcut: check if we have any events in the *in memory* cache - this function + # may be called repeatedly for the same event so at this point we cannot reach + # out to any external cache for performance reasons. The external cache is + # checked later on in the `get_missing_events_from_cache_or_db` function below. + event_entry_map = self._get_events_from_local_cache( event_ids, ) @@ -632,7 +636,9 @@ class EventsWorkerStore(SQLBaseStore): if missing_events_ids: - async def get_missing_events_from_db() -> Dict[str, EventCacheEntry]: + async def get_missing_events_from_cache_or_db() -> Dict[ + str, EventCacheEntry + ]: """Fetches the events in `missing_event_ids` from the database. Also creates entries in `self._current_event_fetches` to allow @@ -657,10 +663,18 @@ class EventsWorkerStore(SQLBaseStore): # the events have been redacted, and if so pulling the redaction event # out of the database to check it. # + missing_events = {} try: - missing_events = await self._get_events_from_db( + # Try to fetch from any external cache. We already checked the + # in-memory cache above. + missing_events = await self._get_events_from_external_cache( missing_events_ids, ) + # Now actually fetch any remaining events from the DB + db_missing_events = await self._get_events_from_db( + missing_events_ids - missing_events.keys(), + ) + missing_events.update(db_missing_events) except Exception as e: with PreserveLoggingContext(): fetching_deferred.errback(e) @@ -679,7 +693,7 @@ class EventsWorkerStore(SQLBaseStore): # cancellations, since multiple `_get_events_from_cache_or_db` calls can # reuse the same fetch. missing_events: Dict[str, EventCacheEntry] = await delay_cancellation( - get_missing_events_from_db() + get_missing_events_from_cache_or_db() ) event_entry_map.update(missing_events) @@ -754,7 +768,54 @@ class EventsWorkerStore(SQLBaseStore): async def _get_events_from_cache( self, events: Iterable[str], update_metrics: bool = True ) -> Dict[str, EventCacheEntry]: - """Fetch events from the caches. + """Fetch events from the caches, both in memory and any external. + + May return rejected events. + + Args: + events: list of event_ids to fetch + update_metrics: Whether to update the cache hit ratio metrics + """ + event_map = self._get_events_from_local_cache( + events, update_metrics=update_metrics + ) + + missing_event_ids = (e for e in events if e not in event_map) + event_map.update( + await self._get_events_from_external_cache( + events=missing_event_ids, + update_metrics=update_metrics, + ) + ) + + return event_map + + async def _get_events_from_external_cache( + self, events: Iterable[str], update_metrics: bool = True + ) -> Dict[str, EventCacheEntry]: + """Fetch events from any configured external cache. + + May return rejected events. + + Args: + events: list of event_ids to fetch + update_metrics: Whether to update the cache hit ratio metrics + """ + event_map = {} + + for event_id in events: + ret = await self._get_event_cache.get_external( + (event_id,), None, update_metrics=update_metrics + ) + if ret: + event_map[event_id] = ret + + return event_map + + def _get_events_from_local_cache( + self, events: Iterable[str], update_metrics: bool = True + ) -> Dict[str, EventCacheEntry]: + """Fetch events from the local, in memory, caches. May return rejected events. @@ -766,7 +827,7 @@ class EventsWorkerStore(SQLBaseStore): for event_id in events: # First check if it's in the event cache - ret = await self._get_event_cache.get( + ret = self._get_event_cache.get_local( (event_id,), None, update_metrics=update_metrics ) if ret: @@ -788,7 +849,7 @@ class EventsWorkerStore(SQLBaseStore): # We add the entry back into the cache as we want to keep # recently queried events in the cache. - await self._get_event_cache.set((event_id,), cache_entry) + self._get_event_cache.set_local((event_id,), cache_entry) return event_map diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e2cccc688c..93ff4816c8 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -896,7 +896,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): # We don't update the event cache hit ratio as it completely throws off # the hit ratio counts. After all, we don't populate the cache if we # miss it here - event_map = await self._get_events_from_cache( + event_map = self._get_events_from_local_cache( member_event_ids, update_metrics=False ) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index b3bdedb04c..aa93109d13 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -834,9 +834,26 @@ class AsyncLruCache(Generic[KT, VT]): ) -> Optional[VT]: return self._lru_cache.get(key, update_metrics=update_metrics) + async def get_external( + self, + key: KT, + default: Optional[T] = None, + update_metrics: bool = True, + ) -> Optional[VT]: + # This method should fetch from any configured external cache, in this case noop. + return None + + def get_local( + self, key: KT, default: Optional[T] = None, update_metrics: bool = True + ) -> Optional[VT]: + return self._lru_cache.get(key, update_metrics=update_metrics) + async def set(self, key: KT, value: VT) -> None: self._lru_cache.set(key, value) + def set_local(self, key: KT, value: VT) -> None: + self._lru_cache.set(key, value) + async def invalidate(self, key: KT) -> None: # This method should invalidate any external cache and then invalidate the LruCache. return self._lru_cache.invalidate(key) -- cgit 1.4.1 From e9e6aacfbe016922ed6a25071c0e9ce77334b4e8 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 4 Aug 2022 15:27:04 +0000 Subject: Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. (#13416) --- changelog.d/13416.misc | 1 + synapse/federation/federation_server.py | 17 +++++++++++++++++ synapse/handlers/federation.py | 17 +++++++++++++++++ 3 files changed, 35 insertions(+) create mode 100644 changelog.d/13416.misc diff --git a/changelog.d/13416.misc b/changelog.d/13416.misc new file mode 100644 index 0000000000..2904e73376 --- /dev/null +++ b/changelog.d/13416.misc @@ -0,0 +1 @@ +Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. \ No newline at end of file diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 1d60137411..db4b83a505 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -843,8 +843,25 @@ class FederationServer(FederationBase): Codes.BAD_JSON, ) + # Note that get_room_version throws if the room does not exist here. room_version = await self.store.get_room_version(room_id) + if await self.store.is_partial_state_room(room_id): + # If our server is still only partially joined, we can't give a complete + # response to /send_join, /send_knock or /send_leave. + # This is because we will not be able to provide the server list (for partial + # joins) or the full state (for full joins). + # Return a 404 as we would if we weren't in the room at all. + logger.info( + f"Rejecting /send_{membership_type} to %s because it's a partial state room", + room_id, + ) + raise SynapseError( + 404, + f"Unable to handle /send_{membership_type} right now; this server is not fully joined.", + errcode=Codes.NOT_FOUND, + ) + if membership_type == Membership.KNOCK and not room_version.msc2403_knocking: raise SynapseError( 403, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 30f1585a85..5042236742 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -754,6 +754,23 @@ class FederationHandler: # (and return a 404 otherwise) room_version = await self.store.get_room_version(room_id) + if await self.store.is_partial_state_room(room_id): + # If our server is still only partially joined, we can't give a complete + # response to /make_join, so return a 404 as we would if we weren't in the + # room at all. + # The main reason we can't respond properly is that we need to know about + # the auth events for the join event that we would return. + # We also should not bother entertaining the /make_join since we cannot + # handle the /send_join. + logger.info( + "Rejecting /make_join to %s because it's a partial state room", room_id + ) + raise SynapseError( + 404, + "Unable to handle /make_join right now; this server is not fully joined.", + errcode=Codes.NOT_FOUND, + ) + # now check that we are *still* in the room is_in_room = await self._event_auth_handler.check_host_in_room( room_id, self.server_name -- cgit 1.4.1 From 96d92156d0f820224f68092e72d6089dceef715a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 4 Aug 2022 17:45:01 +0100 Subject: Update type of `EventContext.rejected` (#13460) --- changelog.d/13460.misc | 1 + synapse/events/snapshot.py | 7 +++---- synapse/storage/databases/main/events.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) create mode 100644 changelog.d/13460.misc diff --git a/changelog.d/13460.misc b/changelog.d/13460.misc new file mode 100644 index 0000000000..f9e9de219d --- /dev/null +++ b/changelog.d/13460.misc @@ -0,0 +1 @@ +Update type of `EventContext.rejected`. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index b700cbbfa1..d3c8083e4a 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -11,11 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, List, Optional, Tuple import attr from frozendict import frozendict -from typing_extensions import Literal from synapse.appservice import ApplicationService from synapse.events import EventBase @@ -33,7 +32,7 @@ class EventContext: Holds information relevant to persisting an event Attributes: - rejected: A rejection reason if the event was rejected, else False + rejected: A rejection reason if the event was rejected, else None _state_group: The ID of the state group for this event. Note that state events are persisted with a state group which includes the new event, so this is @@ -85,7 +84,7 @@ class EventContext: """ _storage: "StorageControllers" - rejected: Union[Literal[False], str] = False + rejected: Optional[str] = None _state_group: Optional[int] = None state_group_before_event: Optional[int] = None _state_delta_due_to_event: Optional[StateMap[str]] = None diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1f600f1190..5560b38a48 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1490,7 +1490,7 @@ class PersistEventsStore: event.sender, "url" in event.content and isinstance(event.content["url"], str), event.get_state_key(), - context.rejected or None, + context.rejected, ) for event, context in events_and_contexts ), -- cgit 1.4.1 From ec24813220f9d54108924dc04aecd24555277b99 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 4 Aug 2022 15:24:44 -0400 Subject: Improve comments (& avoid a duplicate query) in push actions processing. (#13455) * Adds docstrings and inline comments. * Formats SQL queries using triple quoted strings. * Minor formatting changes. * Avoid fetching `event_push_summary_stream_ordering` multiple times in the same transactions. --- changelog.d/13455.misc | 1 + .../storage/databases/main/event_push_actions.py | 282 ++++++++++++--------- 2 files changed, 159 insertions(+), 124 deletions(-) create mode 100644 changelog.d/13455.misc diff --git a/changelog.d/13455.misc b/changelog.d/13455.misc new file mode 100644 index 0000000000..17462c56f3 --- /dev/null +++ b/changelog.d/13455.misc @@ -0,0 +1 @@ +Add some comments about how event push actions are stored. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index dd2627037c..5ddddb1cf3 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -265,7 +265,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas counts.notify_count += row[1] counts.unread_count += row[2] - # Next we need to count highlights, which aren't summarized + # Next we need to count highlights, which aren't summarised sql = """ SELECT COUNT(*) FROM event_push_actions WHERE user_id = ? @@ -280,7 +280,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # Finally we need to count push actions that aren't included in the # summary returned above, e.g. recent events that haven't been - # summarized yet, or the summary is empty due to a recent read receipt. + # summarised yet, or the summary is empty due to a recent read receipt. stream_ordering = max(stream_ordering, summary_stream_ordering) notify_count, unread_count = self._get_notif_unread_count_for_user_room( txn, room_id, user_id, stream_ordering @@ -304,6 +304,17 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas Does not consult `event_push_summary` table, which may include push actions that have been deleted from `event_push_actions` table. + + Args: + txn: The database transaction. + room_id: The room ID to get unread counts for. + user_id: The user ID to get unread counts for. + stream_ordering: The (exclusive) minimum stream ordering to consider. + max_stream_ordering: The (inclusive) maximum stream ordering to consider. + If this is not given, then no maximum is applied. + + Return: + A tuple of the notif count and unread count in the given range. """ # If there have been no events in the room since the stream ordering, @@ -383,27 +394,27 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ) -> List[Tuple[str, str, int, str, bool]]: # find rooms that have a read receipt in them and return the next # push actions - sql = ( - "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," - " ep.highlight " - " FROM (" - " SELECT room_id," - " MAX(stream_ordering) as stream_ordering" - " FROM events" - " INNER JOIN receipts_linearized USING (room_id, event_id)" - " WHERE receipt_type = 'm.read' AND user_id = ?" - " GROUP BY room_id" - ") AS rl," - " event_push_actions AS ep" - " WHERE" - " ep.room_id = rl.room_id" - " AND ep.stream_ordering > rl.stream_ordering" - " AND ep.user_id = ?" - " AND ep.stream_ordering > ?" - " AND ep.stream_ordering <= ?" - " AND ep.notif = 1" - " ORDER BY ep.stream_ordering ASC LIMIT ?" - ) + sql = """ + SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, + ep.highlight + FROM ( + SELECT room_id, + MAX(stream_ordering) as stream_ordering + FROM events + INNER JOIN receipts_linearized USING (room_id, event_id) + WHERE receipt_type = 'm.read' AND user_id = ? + GROUP BY room_id + ) AS rl, + event_push_actions AS ep + WHERE + ep.room_id = rl.room_id + AND ep.stream_ordering > rl.stream_ordering + AND ep.user_id = ? + AND ep.stream_ordering > ? + AND ep.stream_ordering <= ? + AND ep.notif = 1 + ORDER BY ep.stream_ordering ASC LIMIT ? + """ args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall()) @@ -418,23 +429,23 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def get_no_receipt( txn: LoggingTransaction, ) -> List[Tuple[str, str, int, str, bool]]: - sql = ( - "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," - " ep.highlight " - " FROM event_push_actions AS ep" - " INNER JOIN events AS e USING (room_id, event_id)" - " WHERE" - " ep.room_id NOT IN (" - " SELECT room_id FROM receipts_linearized" - " WHERE receipt_type = 'm.read' AND user_id = ?" - " GROUP BY room_id" - " )" - " AND ep.user_id = ?" - " AND ep.stream_ordering > ?" - " AND ep.stream_ordering <= ?" - " AND ep.notif = 1" - " ORDER BY ep.stream_ordering ASC LIMIT ?" - ) + sql = """ + SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, + ep.highlight + FROM event_push_actions AS ep + INNER JOIN events AS e USING (room_id, event_id) + WHERE + ep.room_id NOT IN ( + SELECT room_id FROM receipts_linearized + WHERE receipt_type = 'm.read' AND user_id = ? + GROUP BY room_id + ) + AND ep.user_id = ? + AND ep.stream_ordering > ? + AND ep.stream_ordering <= ? + AND ep.notif = 1 + ORDER BY ep.stream_ordering ASC LIMIT ? + """ args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall()) @@ -490,28 +501,28 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def get_after_receipt( txn: LoggingTransaction, ) -> List[Tuple[str, str, int, str, bool, int]]: - sql = ( - "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," - " ep.highlight, e.received_ts" - " FROM (" - " SELECT room_id," - " MAX(stream_ordering) as stream_ordering" - " FROM events" - " INNER JOIN receipts_linearized USING (room_id, event_id)" - " WHERE receipt_type = 'm.read' AND user_id = ?" - " GROUP BY room_id" - ") AS rl," - " event_push_actions AS ep" - " INNER JOIN events AS e USING (room_id, event_id)" - " WHERE" - " ep.room_id = rl.room_id" - " AND ep.stream_ordering > rl.stream_ordering" - " AND ep.user_id = ?" - " AND ep.stream_ordering > ?" - " AND ep.stream_ordering <= ?" - " AND ep.notif = 1" - " ORDER BY ep.stream_ordering DESC LIMIT ?" - ) + sql = """ + SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, + ep.highlight, e.received_ts + FROM ( + SELECT room_id, + MAX(stream_ordering) as stream_ordering + FROM events + INNER JOIN receipts_linearized USING (room_id, event_id) + WHERE receipt_type = 'm.read' AND user_id = ? + GROUP BY room_id + ) AS rl, + event_push_actions AS ep + INNER JOIN events AS e USING (room_id, event_id) + WHERE + ep.room_id = rl.room_id + AND ep.stream_ordering > rl.stream_ordering + AND ep.user_id = ? + AND ep.stream_ordering > ? + AND ep.stream_ordering <= ? + AND ep.notif = 1 + ORDER BY ep.stream_ordering DESC LIMIT ? + """ args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall()) @@ -526,23 +537,23 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def get_no_receipt( txn: LoggingTransaction, ) -> List[Tuple[str, str, int, str, bool, int]]: - sql = ( - "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," - " ep.highlight, e.received_ts" - " FROM event_push_actions AS ep" - " INNER JOIN events AS e USING (room_id, event_id)" - " WHERE" - " ep.room_id NOT IN (" - " SELECT room_id FROM receipts_linearized" - " WHERE receipt_type = 'm.read' AND user_id = ?" - " GROUP BY room_id" - " )" - " AND ep.user_id = ?" - " AND ep.stream_ordering > ?" - " AND ep.stream_ordering <= ?" - " AND ep.notif = 1" - " ORDER BY ep.stream_ordering DESC LIMIT ?" - ) + sql = """ + SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, + ep.highlight, e.received_ts + FROM event_push_actions AS ep + INNER JOIN events AS e USING (room_id, event_id) + WHERE + ep.room_id NOT IN ( + SELECT room_id FROM receipts_linearized + WHERE receipt_type = 'm.read' AND user_id = ? + GROUP BY room_id + ) + AND ep.user_id = ? + AND ep.stream_ordering > ? + AND ep.stream_ordering <= ? + AND ep.notif = 1 + ORDER BY ep.stream_ordering DESC LIMIT ? + """ args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall()) @@ -769,12 +780,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # [10, , 20], we should treat this as being equivalent to # [10, 10, 20]. # - sql = ( - "SELECT received_ts FROM events" - " WHERE stream_ordering <= ?" - " ORDER BY stream_ordering DESC" - " LIMIT 1" - ) + sql = """ + SELECT received_ts FROM events + WHERE stream_ordering <= ? + ORDER BY stream_ordering DESC + LIMIT 1 + """ while range_end - range_start > 0: middle = (range_end + range_start) // 2 @@ -802,14 +813,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas self, stream_ordering: int ) -> Optional[int]: def f(txn: LoggingTransaction) -> Optional[Tuple[int]]: - sql = ( - "SELECT e.received_ts" - " FROM event_push_actions AS ep" - " JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id" - " WHERE ep.stream_ordering > ? AND notif = 1" - " ORDER BY ep.stream_ordering ASC" - " LIMIT 1" - ) + sql = """ + SELECT e.received_ts + FROM event_push_actions AS ep + JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id + WHERE ep.stream_ordering > ? AND notif = 1 + ORDER BY ep.stream_ordering ASC + LIMIT 1 + """ txn.execute(sql, (stream_ordering,)) return cast(Optional[Tuple[int]], txn.fetchone()) @@ -858,10 +869,13 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas Any push actions which predate the user's most recent read receipt are now redundant, so we can remove them from `event_push_actions` and update `event_push_summary`. + + Returns true if all new receipts have been processed. """ limit = 100 + # The (inclusive) receipt stream ID that was previously processed.. min_receipts_stream_id = self.db_pool.simple_select_one_onecol_txn( txn, table="event_push_summary_last_receipt_stream_id", @@ -871,6 +885,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas max_receipts_stream_id = self._receipts_id_gen.get_current_token() + # The (inclusive) event stream ordering that was previously summarised. + old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( + txn, + table="event_push_summary_stream_ordering", + keyvalues={}, + retcol="stream_ordering", + ) + sql = """ SELECT r.stream_id, r.room_id, r.user_id, e.stream_ordering FROM receipts_linearized AS r @@ -895,13 +917,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ) rows = txn.fetchall() - old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( - txn, - table="event_push_summary_stream_ordering", - keyvalues={}, - retcol="stream_ordering", - ) - # For each new read receipt we delete push actions from before it and # recalculate the summary. for _, room_id, user_id, stream_ordering in rows: @@ -920,10 +935,13 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (room_id, user_id, stream_ordering), ) + # Fetch the notification counts between the stream ordering of the + # latest receipt and what was previously summarised. notif_count, unread_count = self._get_notif_unread_count_for_user_room( txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering ) + # Replace the previous summary with the new counts. self.db_pool.simple_upsert_txn( txn, table="event_push_summary", @@ -956,10 +974,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas return len(rows) < limit def _rotate_notifs_txn(self, txn: LoggingTransaction) -> bool: - """Archives older notifications into event_push_summary. Returns whether - the archiving process has caught up or not. + """Archives older notifications (from event_push_actions) into event_push_summary. + + Returns whether the archiving process has caught up or not. """ + # The (inclusive) event stream ordering that was previously summarised. old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( txn, table="event_push_summary_stream_ordering", @@ -974,7 +994,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas SELECT stream_ordering FROM event_push_actions WHERE stream_ordering > ? ORDER BY stream_ordering ASC LIMIT 1 OFFSET ? - """, + """, (old_rotate_stream_ordering, self._rotate_count), ) stream_row = txn.fetchone() @@ -993,19 +1013,31 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas logger.info("Rotating notifications up to: %s", rotate_to_stream_ordering) - self._rotate_notifs_before_txn(txn, rotate_to_stream_ordering) + self._rotate_notifs_before_txn( + txn, old_rotate_stream_ordering, rotate_to_stream_ordering + ) return caught_up def _rotate_notifs_before_txn( - self, txn: LoggingTransaction, rotate_to_stream_ordering: int + self, + txn: LoggingTransaction, + old_rotate_stream_ordering: int, + rotate_to_stream_ordering: int, ) -> None: - old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( - txn, - table="event_push_summary_stream_ordering", - keyvalues={}, - retcol="stream_ordering", - ) + """Archives older notifications (from event_push_actions) into event_push_summary. + + Any event_push_actions between old_rotate_stream_ordering (exclusive) and + rotate_to_stream_ordering (inclusive) will be added to the event_push_summary + table. + + Args: + txn: The database transaction. + old_rotate_stream_ordering: The previous maximum event stream ordering. + rotate_to_stream_ordering: The new maximum event stream ordering to summarise. + + Returns whether the archiving process has caught up or not. + """ # Calculate the new counts that should be upserted into event_push_summary sql = """ @@ -1093,9 +1125,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas async def _remove_old_push_actions_that_have_rotated( self, ) -> None: - """Clear out old push actions that have been summarized.""" + """Clear out old push actions that have been summarised.""" - # We want to clear out anything that older than a day that *has* already + # We want to clear out anything that is older than a day that *has* already # been rotated. rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol( table="event_push_summary_stream_ordering", @@ -1119,7 +1151,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas SELECT stream_ordering FROM event_push_actions WHERE stream_ordering <= ? AND highlight = 0 ORDER BY stream_ordering ASC LIMIT 1 OFFSET ? - """, + """, ( max_stream_ordering_to_delete, batch_size, @@ -1215,16 +1247,18 @@ class EventPushActionsStore(EventPushActionsWorkerStore): # NB. This assumes event_ids are globally unique since # it makes the query easier to index - sql = ( - "SELECT epa.event_id, epa.room_id," - " epa.stream_ordering, epa.topological_ordering," - " epa.actions, epa.highlight, epa.profile_tag, e.received_ts" - " FROM event_push_actions epa, events e" - " WHERE epa.event_id = e.event_id" - " AND epa.user_id = ? %s" - " AND epa.notif = 1" - " ORDER BY epa.stream_ordering DESC" - " LIMIT ?" % (before_clause,) + sql = """ + SELECT epa.event_id, epa.room_id, + epa.stream_ordering, epa.topological_ordering, + epa.actions, epa.highlight, epa.profile_tag, e.received_ts + FROM event_push_actions epa, events e + WHERE epa.event_id = e.event_id + AND epa.user_id = ? %s + AND epa.notif = 1 + ORDER BY epa.stream_ordering DESC + LIMIT ? + """ % ( + before_clause, ) txn.execute(sql, args) return cast( -- cgit 1.4.1 From 860fdd90985762cc8cf40d073f4ab63564b9fcc0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 4 Aug 2022 14:29:41 -0500 Subject: Fix `@tag_args` being off-by-one (ahead) (#13452) Fix @tag_args being off-by-one (ahead) Example: ``` argspec.args=[ 'self', 'room_id' ] args=( , '!HBehERstyQBxyJDLfR:my.synapse.server' ) ``` --- The previous logic was also flawed and we can end up in a situation like this: ``` argspec.args=['self', 'dest', 'room_id', 'limit', 'extremities'] args=(, 'hs1', '!jAEHKIubyIfuLOdfpY:hs1') ``` From this source: ```py async def backfill( self, dest: str, room_id: str, limit: int, extremities: Collection[str] ) -> Optional[List[EventBase]]: ``` And this usage: ```py events = await self._federation_client.backfill( dest, room_id, limit=limit, extremities=extremities ) ``` which would previously cause this error: ``` synapse_main | 2022-08-04 06:13:12,051 - synapse.handlers.federation - 424 - ERROR - GET-5 - Failed to backfill from hs1 because tuple index out of range synapse_main | Traceback (most recent call last): synapse_main | File "/usr/local/lib/python3.9/site-packages/synapse/handlers/federation.py", line 392, in try_backfill synapse_main | await self._federation_event_handler.backfill( synapse_main | File "/usr/local/lib/python3.9/site-packages/synapse/logging/tracing.py", line 828, in _wrapper synapse_main | return await func(*args, **kwargs) synapse_main | File "/usr/local/lib/python3.9/site-packages/synapse/handlers/federation_event.py", line 593, in backfill synapse_main | events = await self._federation_client.backfill( synapse_main | File "/usr/local/lib/python3.9/site-packages/synapse/logging/tracing.py", line 828, in _wrapper synapse_main | return await func(*args, **kwargs) synapse_main | File "/usr/local/lib/python3.9/site-packages/synapse/logging/tracing.py", line 827, in _wrapper synapse_main | with wrapping_logic(func, *args, **kwargs): synapse_main | File "/usr/local/lib/python3.9/contextlib.py", line 119, in __enter__ synapse_main | return next(self.gen) synapse_main | File "/usr/local/lib/python3.9/site-packages/synapse/logging/tracing.py", line 922, in _wrapping_logic synapse_main | set_attribute("ARG_" + arg, str(args[i + 1])) # type: ignore[index] synapse_main | IndexError: tuple index out of range ``` --- changelog.d/13452.misc | 1 + synapse/logging/opentracing.py | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13452.misc diff --git a/changelog.d/13452.misc b/changelog.d/13452.misc new file mode 100644 index 0000000000..13d1523de2 --- /dev/null +++ b/changelog.d/13452.misc @@ -0,0 +1 @@ +Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index c1aa205eed..fa3f76c27f 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -901,6 +901,11 @@ def trace(func: Callable[P, R]) -> Callable[P, R]: def tag_args(func: Callable[P, R]) -> Callable[P, R]: """ Tags all of the args to the active span. + + Args: + func: `func` is assumed to be a method taking a `self` parameter, or a + `classmethod` taking a `cls` parameter. In either case, a tag is not + created for this parameter. """ if not opentracing: @@ -909,8 +914,14 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: @wraps(func) def _tag_args_inner(*args: P.args, **kwargs: P.kwargs) -> R: argspec = inspect.getfullargspec(func) - for i, arg in enumerate(argspec.args[1:]): - set_tag("ARG_" + arg, str(args[i])) # type: ignore[index] + # We use `[1:]` to skip the `self` object reference and `start=1` to + # make the index line up with `argspec.args`. + # + # FIXME: We could update this handle any type of function by ignoring the + # first argument only if it's named `self` or `cls`. This isn't fool-proof + # but handles the idiomatic cases. + for i, arg in enumerate(args[1:], start=1): # type: ignore[index] + set_tag("ARG_" + argspec.args[i], str(arg)) set_tag("args", str(args[len(argspec.args) :])) # type: ignore[index] set_tag("kwargs", str(kwargs)) return func(*args, **kwargs) -- cgit 1.4.1 From b6a6bb4027c1a812361ac127b8c5ea1226be295d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Aug 2022 20:38:08 +0100 Subject: Add comments about how event push actions are stored. (#13445) --- changelog.d/13445.misc | 1 + .../storage/databases/main/event_push_actions.py | 61 ++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 changelog.d/13445.misc diff --git a/changelog.d/13445.misc b/changelog.d/13445.misc new file mode 100644 index 0000000000..17462c56f3 --- /dev/null +++ b/changelog.d/13445.misc @@ -0,0 +1 @@ +Add some comments about how event push actions are stored. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 5ddddb1cf3..5db70f9a60 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -12,6 +12,67 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +"""Responsible for storing and fetching push actions / notifications. + +There are two main uses for push actions: + 1. Sending out push to a user's device; and + 2. Tracking per-room per-user notification counts (used in sync requests). + +For the former we simply use the `event_push_actions` table, which contains all +the calculated actions for a given user (which were calculated by the +`BulkPushRuleEvaluator`). + +For the latter we could simply count the number of rows in `event_push_actions` +table for a given room/user, but in practice this is *very* heavyweight when +there were a large number of notifications (due to e.g. the user never reading a +room). Plus, keeping all push actions indefinitely uses a lot of disk space. + +To fix these issues, we add a new table `event_push_summary` that tracks +per-user per-room counts of all notifications that happened before a stream +ordering S. Thus, to get the notification count for a user / room we can simply +query a single row in `event_push_summary` and count the number of rows in +`event_push_actions` with a stream ordering larger than S (and as long as S is +"recent", the number of rows needing to be scanned will be small). + +The `event_push_summary` table is updated via a background job that periodically +chooses a new stream ordering S' (usually the latest stream ordering), counts +all notifications in `event_push_actions` between the existing S and S', and +adds them to the existing counts in `event_push_summary`. + +This allows us to delete old rows from `event_push_actions` once those rows have +been counted and added to `event_push_summary` (we call this process +"rotation"). + + +We need to handle when a user sends a read receipt to the room. Again this is +done as a background process. For each receipt we clear the row in +`event_push_summary` and count the number of notifications in +`event_push_actions` that happened after the receipt but before S, and insert +that count into `event_push_summary` (If the receipt happened *after* S then we +simply clear the `event_push_summary`.) + +Note that its possible that if the read receipt is for an old event the relevant +`event_push_actions` rows will have been rotated and we get the wrong count +(it'll be too low). We accept this as a rare edge case that is unlikely to +impact the user much (since the vast majority of read receipts will be for the +latest event). + +The last complication is to handle the race where we request the notifications +counts after a user sends a read receipt into the room, but *before* the +background update handles the receipt (without any special handling the counts +would be outdated). We fix this by including in `event_push_summary` the read +receipt we used when updating `event_push_summary`, and every time we query the +table we check if that matches the most recent read receipt in the room. If yes, +continue as above, if not we simply query the `event_push_actions` table +directly. + +Since read receipts are almost always for recent events, scanning the +`event_push_actions` table in this case is unlikely to be a problem. Even if it +is a problem, it is temporary until the background job handles the new read +receipt. +""" + import logging from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast -- cgit 1.4.1 From 026ac4486cca13d12200667ed8237e22c37edf12 Mon Sep 17 00:00:00 2001 From: Matt C <96466754+buffless-matt@users.noreply.github.com> Date: Fri, 5 Aug 2022 19:37:58 +1000 Subject: Update module API "update room membership" method to allow for remote joins (#13441) Co-authored-by: MattC Co-authored-by: Brendan Abolivier --- changelog.d/13441.feature | 1 + synapse/module_api/__init__.py | 8 ++++---- tests/module_api/test_api.py | 29 +++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13441.feature diff --git a/changelog.d/13441.feature b/changelog.d/13441.feature new file mode 100644 index 0000000000..3a4ae8bf01 --- /dev/null +++ b/changelog.d/13441.feature @@ -0,0 +1 @@ +Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 71145870ee..87ba154cb7 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -929,10 +929,12 @@ class ModuleApi: room_id: str, new_membership: str, content: Optional[JsonDict] = None, + remote_room_hosts: Optional[List[str]] = None, ) -> EventBase: """Updates the membership of a user to the given value. Added in Synapse v1.46.0. + Changed in Synapse v1.65.0: Added the 'remote_room_hosts' parameter. Args: sender: The user performing the membership change. Must be a user local to @@ -946,6 +948,7 @@ class ModuleApi: https://spec.matrix.org/unstable/client-server-api/#mroommember for the list of allowed values. content: Additional values to include in the resulting event's content. + remote_room_hosts: Remote servers to use for remote joins/knocks/etc. Returns: The newly created membership event. @@ -1005,15 +1008,12 @@ class ModuleApi: room_id=room_id, action=new_membership, content=content, + remote_room_hosts=remote_room_hosts, ) # Try to retrieve the resulting event. event = await self._hs.get_datastores().main.get_event(event_id) - # update_membership is supposed to always return after the event has been - # successfully persisted. - assert event is not None - return event async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBase: diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 9bf95472e1..106159fa65 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -16,6 +16,7 @@ from unittest.mock import Mock from twisted.internet import defer from synapse.api.constants import EduTypes, EventTypes +from synapse.api.errors import NotFoundError from synapse.events import EventBase from synapse.federation.units import Transaction from synapse.handlers.presence import UserPresenceState @@ -532,6 +533,34 @@ class ModuleApiTestCase(HomeserverTestCase): self.assertEqual(res["displayname"], "simone") self.assertIsNone(res["avatar_url"]) + def test_update_room_membership_remote_join(self): + """Test that the module API can join a remote room.""" + # Necessary to fake a remote join. + fake_stream_id = 1 + mocked_remote_join = simple_async_mock( + return_value=("fake-event-id", fake_stream_id) + ) + self.hs.get_room_member_handler()._remote_join = mocked_remote_join + fake_remote_host = f"{self.module_api.server_name}-remote" + + # Given that the join is to be faked, we expect the relevant join event not to + # be persisted and the module API method to raise that. + self.get_failure( + defer.ensureDeferred( + self.module_api.update_room_membership( + sender=f"@user:{self.module_api.server_name}", + target=f"@user:{self.module_api.server_name}", + room_id=f"!nonexistent:{fake_remote_host}", + new_membership="join", + remote_room_hosts=[fake_remote_host], + ) + ), + NotFoundError, + ) + + # Check that a remote join was attempted. + self.assertEqual(mocked_remote_join.call_count, 1) + def test_get_room_state(self): """Tests that a module can retrieve the state of a room through the module API.""" user_id = self.register_user("peter", "hackme") -- cgit 1.4.1 From 3d2cabf9669e27016e6c062313709e88a50697bb Mon Sep 17 00:00:00 2001 From: Julian-Samuel Gebühr Date: Fri, 5 Aug 2022 13:15:35 +0200 Subject: Mark token-authenticaticated-registration API as not-experimental (#11897) --- changelog.d/11897.doc | 1 + docs/usage/administration/admin_api/registration_tokens.md | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11897.doc diff --git a/changelog.d/11897.doc b/changelog.d/11897.doc new file mode 100644 index 0000000000..d86b20f53d --- /dev/null +++ b/changelog.d/11897.doc @@ -0,0 +1 @@ +Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. diff --git a/docs/usage/administration/admin_api/registration_tokens.md b/docs/usage/administration/admin_api/registration_tokens.md index 13d5eb75e9..90cbc21125 100644 --- a/docs/usage/administration/admin_api/registration_tokens.md +++ b/docs/usage/administration/admin_api/registration_tokens.md @@ -2,11 +2,11 @@ This API allows you to manage tokens which can be used to authenticate registration requests, as proposed in -[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md). +[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md) +and stabilised in version 1.2 of the Matrix specification. To use it, you will need to enable the `registration_requires_token` config option, and authenticate by providing an `access_token` for a server admin: -see [Admin API](../../usage/administration/admin_api). -Note that this API is still experimental; not all clients may support it yet. +see [Admin API](../admin_api). ## Registration token objects -- cgit 1.4.1 From e2ed1b7155bbd38d4a2752073056c112464b3644 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 5 Aug 2022 16:59:09 +0200 Subject: Use literals in place of `HTTPStatus` constants in tests (#13463) --- changelog.d/13463.misc | 1 + tests/federation/test_complexity.py | 5 +- tests/federation/transport/test_knocking.py | 5 +- tests/handlers/test_deactivate_account.py | 3 +- tests/handlers/test_message.py | 2 +- tests/handlers/test_room_member.py | 3 +- tests/http/server/_base.py | 3 +- tests/rest/client/test_filter.py | 14 +-- tests/rest/client/test_login.py | 127 ++++++++++++++-------------- tests/rest/client/test_redactions.py | 4 +- tests/rest/client/test_register.py | 94 ++++++++++---------- tests/rest/client/test_report_event.py | 4 +- tests/rest/client/test_third_party_rules.py | 22 ++--- tests/rest/client/utils.py | 28 +++--- tests/rest/test_health.py | 4 +- tests/rest/test_well_known.py | 12 ++- tests/test_server.py | 26 +++--- tests/test_terms_auth.py | 6 +- 18 files changed, 172 insertions(+), 191 deletions(-) create mode 100644 changelog.d/13463.misc diff --git a/changelog.d/13463.misc b/changelog.d/13463.misc new file mode 100644 index 0000000000..a4c8691144 --- /dev/null +++ b/changelog.d/13463.misc @@ -0,0 +1 @@ +Use literals in place of `HTTPStatus` constants in tests. diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index c6dd99316a..9f1115dd23 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError @@ -51,7 +50,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.assertEqual(HTTPStatus.OK, channel.code) + self.assertEqual(200, channel.code) complexity = channel.json_body["v1"] self.assertTrue(complexity > 0, complexity) @@ -63,7 +62,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.assertEqual(HTTPStatus.OK, channel.code) + self.assertEqual(200, channel.code) complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index 0d048207b7..d21c11b716 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict -from http import HTTPStatus from typing import Dict, List from synapse.api.constants import EventTypes, JoinRules, Membership @@ -256,7 +255,7 @@ class FederationKnockingTestCase( RoomVersions.V7.identifier, ), ) - self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # Note: We don't expect the knock membership event to be sent over federation as # part of the stripped room state, as the knocking homeserver already has that @@ -294,7 +293,7 @@ class FederationKnockingTestCase( % (room_id, signed_knock_event.event_id), signed_knock_event_json, ) - self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # Check that we got the stripped room state in return room_state_events = channel.json_body["knock_state_events"] diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py index 7586e472b5..ff9f2e8edb 100644 --- a/tests/handlers/test_deactivate_account.py +++ b/tests/handlers/test_deactivate_account.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus from typing import Any, Dict from twisted.test.proto_helpers import MemoryReactor @@ -58,7 +57,7 @@ class DeactivateAccountTestCase(HomeserverTestCase): access_token=self.token, ) - self.assertEqual(req.code, HTTPStatus.OK, req) + self.assertEqual(req.code, 200, req) def test_global_account_data_deleted_upon_deactivation(self) -> None: """ diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 44da96c792..986b50ce0c 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -314,4 +314,4 @@ class ServerAclValidationTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", path, content={}, access_token=self.access_token ) - self.assertEqual(int(channel.result["code"]), 403) + self.assertEqual(channel.code, 403) diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 254e7e4b80..b4e1405aee 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -1,4 +1,3 @@ -from http import HTTPStatus from unittest.mock import Mock, patch from twisted.test.proto_helpers import MemoryReactor @@ -260,7 +259,7 @@ class TestReplicatedJoinsLimitedByPerRoomRateLimiter(RedisMultiWorkerStreamTestC f"/_matrix/client/v3/rooms/{self.room_id}/join", access_token=self.bob_token, ) - self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + self.assertEqual(channel.code, 200, channel.json_body) # wait for join to arrive over replication self.replicate() diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index 994d8880b0..5726e60cee 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -15,7 +15,6 @@ import inspect import itertools import logging -from http import HTTPStatus from typing import ( Any, Callable, @@ -78,7 +77,7 @@ def test_disconnect( if expect_cancellation: expected_code = HTTP_STATUS_REQUEST_CANCELLED else: - expected_code = HTTPStatus.OK + expected_code = 200 request = channel.request if channel.is_finished(): diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index 823e8ab8c4..afc8d641be 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -43,7 +43,7 @@ class FilterTestCase(unittest.HomeserverTestCase): self.EXAMPLE_FILTER_JSON, ) - self.assertEqual(channel.result["code"], b"200") + self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body, {"filter_id": "0"}) filter = self.get_success( self.store.get_user_filter(user_localpart="apple", filter_id=0) @@ -58,7 +58,7 @@ class FilterTestCase(unittest.HomeserverTestCase): self.EXAMPLE_FILTER_JSON, ) - self.assertEqual(channel.result["code"], b"403") + self.assertEqual(channel.code, 403) self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) def test_add_filter_non_local_user(self) -> None: @@ -71,7 +71,7 @@ class FilterTestCase(unittest.HomeserverTestCase): ) self.hs.is_mine = _is_mine - self.assertEqual(channel.result["code"], b"403") + self.assertEqual(channel.code, 403) self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) def test_get_filter(self) -> None: @@ -85,7 +85,7 @@ class FilterTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.user_id, filter_id) ) - self.assertEqual(channel.result["code"], b"200") + self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body, self.EXAMPLE_FILTER) def test_get_filter_non_existant(self) -> None: @@ -93,7 +93,7 @@ class FilterTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.user_id) ) - self.assertEqual(channel.result["code"], b"404") + self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) # Currently invalid params do not have an appropriate errcode @@ -103,7 +103,7 @@ class FilterTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.user_id) ) - self.assertEqual(channel.result["code"], b"400") + self.assertEqual(channel.code, 400) # No ID also returns an invalid_id error def test_get_filter_no_id(self) -> None: @@ -111,4 +111,4 @@ class FilterTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/r0/user/%s/filter/" % (self.user_id) ) - self.assertEqual(channel.result["code"], b"400") + self.assertEqual(channel.code, 400) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index a2958f6959..e2a4d98275 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -13,7 +13,6 @@ # limitations under the License. import time import urllib.parse -from http import HTTPStatus from typing import Any, Dict, List, Optional from unittest.mock import Mock from urllib.parse import urlencode @@ -134,10 +133,10 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request(b"POST", LOGIN_URL, params) if i == 5: - self.assertEqual(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. @@ -152,7 +151,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) @override_config( { @@ -179,10 +178,10 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request(b"POST", LOGIN_URL, params) if i == 5: - self.assertEqual(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. @@ -197,7 +196,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) @override_config( { @@ -224,10 +223,10 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request(b"POST", LOGIN_URL, params) if i == 5: - self.assertEqual(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. @@ -242,7 +241,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) @override_config({"session_lifetime": "24h"}) def test_soft_logout(self) -> None: @@ -250,7 +249,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # we shouldn't be able to make requests without an access token channel = self.make_request(b"GET", TEST_URL) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN") # log in as normal @@ -261,20 +260,20 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) access_token = channel.json_body["access_token"] device_id = channel.json_body["device_id"] # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + self.assertEqual(channel.code, 401, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -288,7 +287,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # more requests with the expired token should still return a soft-logout self.reactor.advance(3600) channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + self.assertEqual(channel.code, 401, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -296,7 +295,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): self._delete_device(access_token_2, "kermit", "monkey", device_id) channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + self.assertEqual(channel.code, 401, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], False) @@ -307,7 +306,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request( b"DELETE", "devices/" + device_id, access_token=access_token ) - self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + self.assertEqual(channel.code, 401, channel.result) # check it's a UI-Auth fail self.assertEqual( set(channel.json_body.keys()), @@ -330,7 +329,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): access_token=access_token, content={"auth": auth}, ) - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) @override_config({"session_lifetime": "24h"}) def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None: @@ -341,20 +340,20 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + self.assertEqual(channel.code, 401, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) # Now try to hard logout this session channel = self.make_request(b"POST", "/logout", access_token=access_token) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) @override_config({"session_lifetime": "24h"}) def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out( @@ -367,20 +366,20 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + self.assertEqual(channel.code, 401, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) # Now try to hard log out all of the user's sessions channel = self.make_request(b"POST", "/logout/all", access_token=access_token) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) def test_login_with_overly_long_device_id_fails(self) -> None: self.register_user("mickey", "cheese") @@ -466,7 +465,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): def test_get_login_flows(self) -> None: """GET /login should return password and SSO flows""" channel = self.make_request("GET", "/_matrix/client/r0/login") - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) expected_flow_types = [ "m.login.cas", @@ -494,14 +493,14 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): """/login/sso/redirect should redirect to an identity picker""" # first hit the redirect url, which should redirect to our idp picker channel = self._make_sso_redirect_request(None) - self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers uri = location_headers[0] # hitting that picker should give us some HTML channel = self.make_request("GET", uri) - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) # parse the form to check it has fields assumed elsewhere in this class html = channel.result["body"].decode("utf-8") @@ -530,7 +529,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): + "&idp=cas", shorthand=False, ) - self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers cas_uri = location_headers[0] @@ -555,7 +554,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + "&idp=saml", ) - self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers saml_uri = location_headers[0] @@ -579,7 +578,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + "&idp=oidc", ) - self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers oidc_uri = location_headers[0] @@ -606,7 +605,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): channel = self.helper.complete_oidc_auth(oidc_uri, cookies, {"sub": "user1"}) # that should serve a confirmation page - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) content_type_headers = channel.headers.getRawHeaders("Content-Type") assert content_type_headers self.assertTrue(content_type_headers[-1].startswith("text/html")) @@ -634,7 +633,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): "/login", content={"type": "m.login.token", "token": login_token}, ) - self.assertEqual(chan.code, HTTPStatus.OK, chan.result) + self.assertEqual(chan.code, 200, chan.result) self.assertEqual(chan.json_body["user_id"], "@user1:test") def test_multi_sso_redirect_to_unknown(self) -> None: @@ -643,18 +642,18 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): "GET", "/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz", ) - self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual(channel.code, 400, channel.result) def test_client_idp_redirect_to_unknown(self) -> None: """If the client tries to pick an unknown IdP, return a 404""" channel = self._make_sso_redirect_request("xxx") - self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.result) + self.assertEqual(channel.code, 404, channel.result) self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND") def test_client_idp_redirect_to_oidc(self) -> None: """If the client pick a known IdP, redirect to it""" channel = self._make_sso_redirect_request("oidc") - self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers oidc_uri = location_headers[0] @@ -765,7 +764,7 @@ class CASTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", cas_ticket_url) # Test that the response is HTML. - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.code, 200, channel.result) content_type_header_value = "" for header in channel.result.get("headers", []): if header[0] == b"Content-Type": @@ -878,17 +877,17 @@ class JWTTestCase(unittest.HomeserverTestCase): def test_login_jwt_valid_registered(self) -> None: self.register_user("kermit", "monkey") channel = self.jwt_login({"sub": "kermit"}) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") def test_login_jwt_valid_unregistered(self) -> None: channel = self.jwt_login({"sub": "frog"}) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@frog:test") def test_login_jwt_invalid_signature(self) -> None: channel = self.jwt_login({"sub": "frog"}, "notsecret") - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -897,7 +896,7 @@ class JWTTestCase(unittest.HomeserverTestCase): def test_login_jwt_expired(self) -> None: channel = self.jwt_login({"sub": "frog", "exp": 864000}) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -907,7 +906,7 @@ class JWTTestCase(unittest.HomeserverTestCase): def test_login_jwt_not_before(self) -> None: now = int(time.time()) channel = self.jwt_login({"sub": "frog", "nbf": now + 3600}) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -916,7 +915,7 @@ class JWTTestCase(unittest.HomeserverTestCase): def test_login_no_sub(self) -> None: channel = self.jwt_login({"username": "root"}) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual(channel.json_body["error"], "Invalid JWT") @@ -925,12 +924,12 @@ class JWTTestCase(unittest.HomeserverTestCase): """Test validating the issuer claim.""" # A valid issuer. channel = self.jwt_login({"sub": "kermit", "iss": "test-issuer"}) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") # An invalid issuer. channel = self.jwt_login({"sub": "kermit", "iss": "invalid"}) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -939,7 +938,7 @@ class JWTTestCase(unittest.HomeserverTestCase): # Not providing an issuer. channel = self.jwt_login({"sub": "kermit"}) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -949,7 +948,7 @@ class JWTTestCase(unittest.HomeserverTestCase): def test_login_iss_no_config(self) -> None: """Test providing an issuer claim without requiring it in the configuration.""" channel = self.jwt_login({"sub": "kermit", "iss": "invalid"}) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") @override_config({"jwt_config": {**base_config, "audiences": ["test-audience"]}}) @@ -957,12 +956,12 @@ class JWTTestCase(unittest.HomeserverTestCase): """Test validating the audience claim.""" # A valid audience. channel = self.jwt_login({"sub": "kermit", "aud": "test-audience"}) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") # An invalid audience. channel = self.jwt_login({"sub": "kermit", "aud": "invalid"}) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -971,7 +970,7 @@ class JWTTestCase(unittest.HomeserverTestCase): # Not providing an audience. channel = self.jwt_login({"sub": "kermit"}) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -981,7 +980,7 @@ class JWTTestCase(unittest.HomeserverTestCase): def test_login_aud_no_config(self) -> None: """Test providing an audience without requiring it in the configuration.""" channel = self.jwt_login({"sub": "kermit", "aud": "invalid"}) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -991,20 +990,20 @@ class JWTTestCase(unittest.HomeserverTestCase): def test_login_default_sub(self) -> None: """Test reading user ID from the default subject claim.""" channel = self.jwt_login({"sub": "kermit"}) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") @override_config({"jwt_config": {**base_config, "subject_claim": "username"}}) def test_login_custom_sub(self) -> None: """Test reading user ID from a custom subject claim.""" channel = self.jwt_login({"username": "frog"}) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@frog:test") def test_login_no_token(self) -> None: params = {"type": "org.matrix.login.jwt"} channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual(channel.json_body["error"], "Token field for JWT is missing") @@ -1086,12 +1085,12 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): def test_login_jwt_valid(self) -> None: channel = self.jwt_login({"sub": "kermit"}) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") def test_login_jwt_invalid_signature(self) -> None: channel = self.jwt_login({"sub": "frog"}, self.bad_privatekey) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], @@ -1152,7 +1151,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) def test_login_appservice_user_bot(self) -> None: """Test that the appservice bot can use /login""" @@ -1166,7 +1165,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) def test_login_appservice_wrong_user(self) -> None: """Test that non-as users cannot login with the as token""" @@ -1180,7 +1179,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) def test_login_appservice_wrong_as(self) -> None: """Test that as users cannot login with wrong as token""" @@ -1194,7 +1193,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): b"POST", LOGIN_URL, params, access_token=self.another_service.token ) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) def test_login_appservice_no_token(self) -> None: """Test that users must provide a token when using the appservice @@ -1208,7 +1207,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) @skip_unless(HAS_OIDC, "requires OIDC") @@ -1246,7 +1245,7 @@ class UsernamePickerTestCase(HomeserverTestCase): ) # that should redirect to the username picker - self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers picker_url = location_headers[0] @@ -1290,7 +1289,7 @@ class UsernamePickerTestCase(HomeserverTestCase): ("Content-Length", str(len(content))), ], ) - self.assertEqual(chan.code, HTTPStatus.FOUND, chan.result) + self.assertEqual(chan.code, 302, chan.result) location_headers = chan.headers.getRawHeaders("Location") assert location_headers @@ -1300,7 +1299,7 @@ class UsernamePickerTestCase(HomeserverTestCase): path=location_headers[0], custom_headers=[("Cookie", "username_mapping_session=" + session_id)], ) - self.assertEqual(chan.code, HTTPStatus.FOUND, chan.result) + self.assertEqual(chan.code, 302, chan.result) location_headers = chan.headers.getRawHeaders("Location") assert location_headers @@ -1325,5 +1324,5 @@ class UsernamePickerTestCase(HomeserverTestCase): "/login", content={"type": "m.login.token", "token": login_token}, ) - self.assertEqual(chan.code, HTTPStatus.OK, chan.result) + self.assertEqual(chan.code, 200, chan.result) self.assertEqual(chan.json_body["user_id"], "@bobby:test") diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 7401b5e0c0..be4c67d68e 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -76,12 +76,12 @@ class RedactionsTestCase(HomeserverTestCase): path = "/_matrix/client/r0/rooms/%s/redact/%s" % (room_id, event_id) channel = self.make_request("POST", path, content={}, access_token=access_token) - self.assertEqual(int(channel.result["code"]), expect_code) + self.assertEqual(channel.code, expect_code) return channel.json_body def _sync_room_timeline(self, access_token: str, room_id: str) -> List[JsonDict]: channel = self.make_request("GET", "sync", access_token=self.mod_access_token) - self.assertEqual(channel.result["code"], b"200") + self.assertEqual(channel.code, 200) room_sync = channel.json_body["rooms"]["join"][room_id] return room_sync["timeline"]["events"] diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index f8e64ce6ac..ab4277dd31 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -70,7 +70,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) det_data = {"user_id": user_id, "home_server": self.hs.hostname} self.assertDictContainsSubset(det_data, channel.json_body) @@ -91,7 +91,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) - self.assertEqual(channel.result["code"], b"400", channel.result) + self.assertEqual(channel.code, 400, msg=channel.result) def test_POST_appservice_registration_invalid(self) -> None: self.appservice = None # no application service exists @@ -100,20 +100,20 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) def test_POST_bad_password(self) -> None: request_data = {"username": "kermit", "password": 666} channel = self.make_request(b"POST", self.url, request_data) - self.assertEqual(channel.result["code"], b"400", channel.result) + self.assertEqual(channel.code, 400, msg=channel.result) self.assertEqual(channel.json_body["error"], "Invalid password") def test_POST_bad_username(self) -> None: request_data = {"username": 777, "password": "monkey"} channel = self.make_request(b"POST", self.url, request_data) - self.assertEqual(channel.result["code"], b"400", channel.result) + self.assertEqual(channel.code, 400, msg=channel.result) self.assertEqual(channel.json_body["error"], "Invalid username") def test_POST_user_valid(self) -> None: @@ -132,7 +132,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "home_server": self.hs.hostname, "device_id": device_id, } - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertDictContainsSubset(det_data, channel.json_body) @override_config({"enable_registration": False}) @@ -142,7 +142,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request(b"POST", self.url, request_data) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["error"], "Registration has been disabled") self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -153,7 +153,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") det_data = {"home_server": self.hs.hostname, "device_id": "guest_device"} - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertDictContainsSubset(det_data, channel.json_body) def test_POST_disabled_guest_registration(self) -> None: @@ -161,7 +161,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["error"], "Guest access is disabled") @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) @@ -171,16 +171,16 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request(b"POST", url, b"{}") if i == 5: - self.assertEqual(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.reactor.advance(retry_after_ms / 1000.0 + 1.0) channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting(self) -> None: @@ -194,16 +194,16 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): channel = self.make_request(b"POST", self.url, request_data) if i == 5: - self.assertEqual(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.reactor.advance(retry_after_ms / 1000.0 + 1.0) channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) @override_config({"registration_requires_token": True}) def test_POST_registration_requires_token(self) -> None: @@ -231,7 +231,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): # Request without auth to get flows and session channel = self.make_request(b"POST", self.url, params) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) flows = channel.json_body["flows"] # Synapse adds a dummy stage to differentiate flows where otherwise one # flow would be a subset of another flow. @@ -248,7 +248,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "session": session, } channel = self.make_request(b"POST", self.url, params) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) completed = channel.json_body["completed"] self.assertCountEqual([LoginType.REGISTRATION_TOKEN], completed) @@ -263,7 +263,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "home_server": self.hs.hostname, "device_id": device_id, } - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertDictContainsSubset(det_data, channel.json_body) # Check the `completed` counter has been incremented and pending is 0 @@ -293,21 +293,21 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "session": session, } channel = self.make_request(b"POST", self.url, params) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) self.assertEqual(channel.json_body["errcode"], Codes.MISSING_PARAM) self.assertEqual(channel.json_body["completed"], []) # Test with non-string (invalid) params["auth"]["token"] = 1234 channel = self.make_request(b"POST", self.url, params) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) self.assertEqual(channel.json_body["completed"], []) # Test with unknown token (invalid) params["auth"]["token"] = "1234" channel = self.make_request(b"POST", self.url, params) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -361,7 +361,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "session": session2, } channel = self.make_request(b"POST", self.url, params2) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -381,7 +381,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): # Check auth still fails when using token with session2 channel = self.make_request(b"POST", self.url, params2) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -415,7 +415,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "session": session, } channel = self.make_request(b"POST", self.url, params) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -570,7 +570,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): def test_advertised_flows(self) -> None: channel = self.make_request(b"POST", self.url, b"{}") - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) flows = channel.json_body["flows"] # with the stock config, we only expect the dummy flow @@ -593,7 +593,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) def test_advertised_flows_captcha_and_terms_and_3pids(self) -> None: channel = self.make_request(b"POST", self.url, b"{}") - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) flows = channel.json_body["flows"] self.assertCountEqual( @@ -625,7 +625,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) def test_advertised_flows_no_msisdn_email_required(self) -> None: channel = self.make_request(b"POST", self.url, b"{}") - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) flows = channel.json_body["flows"] # with the stock config, we expect all four combinations of 3pid @@ -797,13 +797,13 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): # endpoint. channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.reactor.advance(datetime.timedelta(weeks=1).total_seconds()) channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual( channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result ) @@ -823,12 +823,12 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): url = "/_synapse/admin/v1/account_validity/validity" request_data = {"user_id": user_id} channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) def test_manual_expire(self) -> None: user_id = self.register_user("kermit", "monkey") @@ -844,12 +844,12 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): "enable_renewal_emails": False, } channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual( channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result ) @@ -868,18 +868,18 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): "enable_renewal_emails": False, } channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # Try to log the user out channel = self.make_request(b"POST", "/logout", access_token=tok) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # Log the user in again (allowed for expired accounts) tok = self.login("kermit", "monkey") # Try to log out all of the user's sessions channel = self.make_request(b"POST", "/logout/all", access_token=tok) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): @@ -954,7 +954,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): renewal_token = self.get_success(self.store.get_renewal_token_for_user(user_id)) url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token channel = self.make_request(b"GET", url) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # Check that we're getting HTML back. content_type = channel.headers.getRawHeaders(b"Content-Type") @@ -972,7 +972,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # Move 1 day forward. Try to renew with the same token again. url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token channel = self.make_request(b"GET", url) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # Check that we're getting HTML back. content_type = channel.headers.getRawHeaders(b"Content-Type") @@ -992,14 +992,14 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # succeed. self.reactor.advance(datetime.timedelta(days=3).total_seconds()) channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) def test_renewal_invalid_token(self) -> None: # Hit the renewal endpoint with an invalid token and check that it behaves as # expected, i.e. that it responds with 404 Not Found and the correct HTML. url = "/_matrix/client/unstable/account_validity/renew?token=123" channel = self.make_request(b"GET", url) - self.assertEqual(channel.result["code"], b"404", channel.result) + self.assertEqual(channel.code, 404, msg=channel.result) # Check that we're getting HTML back. content_type = channel.headers.getRawHeaders(b"Content-Type") @@ -1023,7 +1023,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): "/_matrix/client/unstable/account_validity/send_mail", access_token=tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(len(self.email_attempts), 1) @@ -1096,7 +1096,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): "/_matrix/client/unstable/account_validity/send_mail", access_token=tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(len(self.email_attempts), 1) @@ -1176,7 +1176,7 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase): b"GET", f"{self.url}?token={token}", ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["valid"], True) def test_GET_token_invalid(self) -> None: @@ -1185,7 +1185,7 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase): b"GET", f"{self.url}?token={token}", ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["valid"], False) @override_config( @@ -1201,10 +1201,10 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase): ) if i == 5: - self.assertEqual(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.reactor.advance(retry_after_ms / 1000.0 + 1.0) @@ -1212,4 +1212,4 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase): b"GET", f"{self.url}?token={token}", ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index ad0d0209f7..7cb1017a4a 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -77,6 +77,4 @@ class ReportEventTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", self.report_path, data, access_token=self.other_user_tok ) - self.assertEqual( - response_status, int(channel.result["code"]), msg=channel.result["body"] - ) + self.assertEqual(response_status, channel.code, msg=channel.result["body"]) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 18a7195409..3325d43a2f 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -155,7 +155,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): {}, access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) callback.assert_called_once() @@ -173,7 +173,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): {}, access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, 403, channel.result) def test_third_party_rules_workaround_synapse_errors_pass_through(self) -> None: """ @@ -211,7 +211,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): access_token=self.tok, ) # Check the error code - self.assertEqual(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.code, 429, channel.result) # Check the JSON body has had the `nasty` key injected self.assertEqual( channel.json_body, @@ -260,7 +260,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): {"x": "x"}, access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) event_id = channel.json_body["event_id"] # ... and check that it got modified @@ -269,7 +269,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) ev = channel.json_body self.assertEqual(ev["content"]["x"], "y") @@ -298,7 +298,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): }, access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) orig_event_id = channel.json_body["event_id"] channel = self.make_request( @@ -315,7 +315,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): }, access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) edited_event_id = channel.json_body["event_id"] # ... and check that they both got modified @@ -324,7 +324,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, orig_event_id), access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) ev = channel.json_body self.assertEqual(ev["content"]["body"], "ORIGINAL BODY") @@ -333,7 +333,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, edited_event_id), access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) ev = channel.json_body self.assertEqual(ev["content"]["body"], "EDITED BODY") @@ -379,7 +379,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): }, access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) event_id = channel.json_body["event_id"] @@ -388,7 +388,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), access_token=self.tok, ) - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) self.assertIn("foo", channel.json_body["content"].keys()) self.assertEqual(channel.json_body["content"]["foo"], "bar") diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 105d418698..dd26145bf8 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -140,7 +140,7 @@ class RestHelper: custom_headers=custom_headers, ) - assert channel.result["code"] == b"%d" % expect_code, channel.result + assert channel.code == expect_code, channel.result self.auth_user_id = temp_id if expect_code == HTTPStatus.OK: @@ -213,11 +213,9 @@ class RestHelper: data, ) - assert ( - int(channel.result["code"]) == expect_code - ), "Expected: %d, got: %d, resp: %r" % ( + assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( expect_code, - int(channel.result["code"]), + channel.code, channel.result["body"], ) @@ -312,11 +310,9 @@ class RestHelper: data, ) - assert ( - int(channel.result["code"]) == expect_code - ), "Expected: %d, got: %d, resp: %r" % ( + assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( expect_code, - int(channel.result["code"]), + channel.code, channel.result["body"], ) @@ -396,11 +392,9 @@ class RestHelper: custom_headers=custom_headers, ) - assert ( - int(channel.result["code"]) == expect_code - ), "Expected: %d, got: %d, resp: %r" % ( + assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( expect_code, - int(channel.result["code"]), + channel.code, channel.result["body"], ) @@ -449,11 +443,9 @@ class RestHelper: channel = make_request(self.hs.get_reactor(), self.site, method, path, content) - assert ( - int(channel.result["code"]) == expect_code - ), "Expected: %d, got: %d, resp: %r" % ( + assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( expect_code, - int(channel.result["code"]), + channel.code, channel.result["body"], ) @@ -545,7 +537,7 @@ class RestHelper: assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( expect_code, - int(channel.result["code"]), + channel.code, channel.result["body"], ) diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py index da325955f8..c0a2501742 100644 --- a/tests/rest/test_health.py +++ b/tests/rest/test_health.py @@ -11,8 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus - from synapse.rest.health import HealthResource from tests import unittest @@ -26,5 +24,5 @@ class HealthCheckTests(unittest.HomeserverTestCase): def test_health(self) -> None: channel = self.make_request("GET", "/health", shorthand=False) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, 200) self.assertEqual(channel.result["body"], b"OK") diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index d8faafec75..2091b08d89 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -11,8 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus - from twisted.web.resource import Resource from synapse.rest.well_known import well_known_resource @@ -38,7 +36,7 @@ class WellKnownTests(unittest.HomeserverTestCase): "GET", "/.well-known/matrix/client", shorthand=False ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, 200) self.assertEqual( channel.json_body, { @@ -57,7 +55,7 @@ class WellKnownTests(unittest.HomeserverTestCase): "GET", "/.well-known/matrix/client", shorthand=False ) - self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) + self.assertEqual(channel.code, 404) @unittest.override_config( { @@ -71,7 +69,7 @@ class WellKnownTests(unittest.HomeserverTestCase): "GET", "/.well-known/matrix/client", shorthand=False ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, 200) self.assertEqual( channel.json_body, { @@ -87,7 +85,7 @@ class WellKnownTests(unittest.HomeserverTestCase): "GET", "/.well-known/matrix/server", shorthand=False ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, 200) self.assertEqual( channel.json_body, {"m.server": "test:443"}, @@ -97,4 +95,4 @@ class WellKnownTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", "/.well-known/matrix/server", shorthand=False ) - self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) + self.assertEqual(channel.code, 404) diff --git a/tests/test_server.py b/tests/test_server.py index 2fe4411401..d2b2d8344a 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -104,7 +104,7 @@ class JsonResourceTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo" ) - self.assertEqual(channel.result["code"], b"500") + self.assertEqual(channel.code, 500) def test_callback_indirect_exception(self) -> None: """ @@ -130,7 +130,7 @@ class JsonResourceTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo" ) - self.assertEqual(channel.result["code"], b"500") + self.assertEqual(channel.code, 500) def test_callback_synapseerror(self) -> None: """ @@ -150,7 +150,7 @@ class JsonResourceTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo" ) - self.assertEqual(channel.result["code"], b"403") + self.assertEqual(channel.code, 403) self.assertEqual(channel.json_body["error"], "Forbidden!!one!") self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -174,7 +174,7 @@ class JsonResourceTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foobar" ) - self.assertEqual(channel.result["code"], b"400") + self.assertEqual(channel.code, 400) self.assertEqual(channel.json_body["error"], "Unrecognized request") self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED") @@ -203,7 +203,7 @@ class JsonResourceTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"HEAD", b"/_matrix/foo" ) - self.assertEqual(channel.result["code"], b"200") + self.assertEqual(channel.code, 200) self.assertNotIn("body", channel.result) @@ -242,7 +242,7 @@ class OptionsResourceTests(unittest.TestCase): def test_unknown_options_request(self) -> None: """An OPTIONS requests to an unknown URL still returns 204 No Content.""" channel = self._make_request(b"OPTIONS", b"/foo/") - self.assertEqual(channel.result["code"], b"204") + self.assertEqual(channel.code, 204) self.assertNotIn("body", channel.result) # Ensure the correct CORS headers have been added @@ -262,7 +262,7 @@ class OptionsResourceTests(unittest.TestCase): def test_known_options_request(self) -> None: """An OPTIONS requests to an known URL still returns 204 No Content.""" channel = self._make_request(b"OPTIONS", b"/res/") - self.assertEqual(channel.result["code"], b"204") + self.assertEqual(channel.code, 204) self.assertNotIn("body", channel.result) # Ensure the correct CORS headers have been added @@ -282,12 +282,12 @@ class OptionsResourceTests(unittest.TestCase): def test_unknown_request(self) -> None: """A non-OPTIONS request to an unknown URL should 404.""" channel = self._make_request(b"GET", b"/foo/") - self.assertEqual(channel.result["code"], b"404") + self.assertEqual(channel.code, 404) def test_known_request(self) -> None: """A non-OPTIONS request to an known URL should query the proper resource.""" channel = self._make_request(b"GET", b"/res/") - self.assertEqual(channel.result["code"], b"200") + self.assertEqual(channel.code, 200) self.assertEqual(channel.result["body"], b"/res/") @@ -314,7 +314,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"GET", b"/path" ) - self.assertEqual(channel.result["code"], b"200") + self.assertEqual(channel.code, 200) body = channel.result["body"] self.assertEqual(body, b"response") @@ -334,7 +334,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"GET", b"/path" ) - self.assertEqual(channel.result["code"], b"301") + self.assertEqual(channel.code, 301) headers = channel.result["headers"] location_headers = [v for k, v in headers if k == b"Location"] self.assertEqual(location_headers, [b"/look/an/eagle"]) @@ -357,7 +357,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"GET", b"/path" ) - self.assertEqual(channel.result["code"], b"304") + self.assertEqual(channel.code, 304) headers = channel.result["headers"] location_headers = [v for k, v in headers if k == b"Location"] self.assertEqual(location_headers, [b"/no/over/there"]) @@ -378,7 +378,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): self.reactor, FakeSite(res, self.reactor), b"HEAD", b"/path" ) - self.assertEqual(channel.result["code"], b"200") + self.assertEqual(channel.code, 200) self.assertNotIn("body", channel.result) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index d3c13cf14c..abd7459a8c 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -53,7 +53,7 @@ class TermsTestCase(unittest.HomeserverTestCase): request_data = {"username": "kermit", "password": "monkey"} channel = self.make_request(b"POST", self.url, request_data) - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, channel.result) self.assertTrue(channel.json_body is not None) self.assertIsInstance(channel.json_body["session"], str) @@ -96,7 +96,7 @@ class TermsTestCase(unittest.HomeserverTestCase): # We don't bother checking that the response is correct - we'll leave that to # other tests. We just want to make sure we're on the right path. - self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.code, 401, channel.result) # Finish the UI auth for terms request_data = { @@ -112,7 +112,7 @@ class TermsTestCase(unittest.HomeserverTestCase): # We're interested in getting a response that looks like a successful # registration, not so much that the details are exactly what we want. - self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, 200, channel.result) self.assertTrue(channel.json_body is not None) self.assertIsInstance(channel.json_body["user_id"], str) -- cgit 1.4.1 From ab18441573dc14cea1fe4082b2a89b9d392a4b9f Mon Sep 17 00:00:00 2001 From: Šimon Brandner Date: Fri, 5 Aug 2022 17:09:33 +0200 Subject: Support stable identifiers for MSC2285: private read receipts. (#13273) This adds support for the stable identifiers of MSC2285 while continuing to support the unstable identifiers behind the configuration flag. These will be removed in a future version. --- changelog.d/13273.feature | 1 + synapse/api/constants.py | 3 +- synapse/config/experimental.py | 2 +- synapse/handlers/initial_sync.py | 11 +-- synapse/handlers/receipts.py | 36 ++++++--- synapse/replication/tcp/client.py | 5 +- synapse/rest/client/notifications.py | 7 +- synapse/rest/client/read_marker.py | 8 +- synapse/rest/client/receipts.py | 10 ++- synapse/rest/client/versions.py | 1 + .../storage/databases/main/event_push_actions.py | 85 ++++++++++++++++++---- tests/handlers/test_receipts.py | 58 +++++++++++---- tests/rest/client/test_sync.py | 58 ++++++++++----- tests/storage/test_receipts.py | 55 +++++++++----- 14 files changed, 246 insertions(+), 94 deletions(-) create mode 100644 changelog.d/13273.feature diff --git a/changelog.d/13273.feature b/changelog.d/13273.feature new file mode 100644 index 0000000000..53110d74e9 --- /dev/null +++ b/changelog.d/13273.feature @@ -0,0 +1 @@ +Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 789859e69e..1d46fb0e43 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -257,7 +257,8 @@ class GuestAccess: class ReceiptTypes: READ: Final = "m.read" - READ_PRIVATE: Final = "org.matrix.msc2285.read.private" + READ_PRIVATE: Final = "m.read.private" + UNSTABLE_READ_PRIVATE: Final = "org.matrix.msc2285.read.private" FULLY_READ: Final = "m.fully_read" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index c2ecd977cd..7d17c958bb 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -32,7 +32,7 @@ class ExperimentalConfig(Config): # MSC2716 (importing historical messages) self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) - # MSC2285 (private read receipts) + # MSC2285 (unstable private read receipts) self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False) # MSC3244 (room version capabilities) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 85b472f250..6484e47e5f 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -143,8 +143,8 @@ class InitialSyncHandler: joined_rooms, to_key=int(now_token.receipt_key), ) - if self.hs.config.experimental.msc2285_enabled: - receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id) + + receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id) tags_by_room = await self.store.get_tags_for_user(user_id) @@ -456,11 +456,8 @@ class InitialSyncHandler: ) if not receipts: return [] - if self.hs.config.experimental.msc2285_enabled: - receipts = ReceiptEventSource.filter_out_private_receipts( - receipts, user_id - ) - return receipts + + return ReceiptEventSource.filter_out_private_receipts(receipts, user_id) presence, receipts, (messages, token) = await make_deferred_yieldable( gather_results( diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 43d2882b0a..d4a866b346 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -163,7 +163,10 @@ class ReceiptsHandler: if not is_new: return - if self.federation_sender and receipt_type != ReceiptTypes.READ_PRIVATE: + if self.federation_sender and receipt_type not in ( + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ): await self.federation_sender.send_read_receipt(receipt) @@ -203,24 +206,38 @@ class ReceiptEventSource(EventSource[int, JsonDict]): for event_id, orig_event_content in room.get("content", {}).items(): event_content = orig_event_content # If there are private read receipts, additional logic is necessary. - if ReceiptTypes.READ_PRIVATE in event_content: + if ( + ReceiptTypes.READ_PRIVATE in event_content + or ReceiptTypes.UNSTABLE_READ_PRIVATE in event_content + ): # Make a copy without private read receipts to avoid leaking # other user's private read receipts.. event_content = { receipt_type: receipt_value for receipt_type, receipt_value in event_content.items() - if receipt_type != ReceiptTypes.READ_PRIVATE + if receipt_type + not in ( + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ) } # Copy the current user's private read receipt from the # original content, if it exists. - user_private_read_receipt = orig_event_content[ - ReceiptTypes.READ_PRIVATE - ].get(user_id, None) + user_private_read_receipt = orig_event_content.get( + ReceiptTypes.READ_PRIVATE, {} + ).get(user_id, None) if user_private_read_receipt: event_content[ReceiptTypes.READ_PRIVATE] = { user_id: user_private_read_receipt } + user_unstable_private_read_receipt = orig_event_content.get( + ReceiptTypes.UNSTABLE_READ_PRIVATE, {} + ).get(user_id, None) + if user_unstable_private_read_receipt: + event_content[ReceiptTypes.UNSTABLE_READ_PRIVATE] = { + user_id: user_unstable_private_read_receipt + } # Include the event if there is at least one non-private read # receipt or the current user has a private read receipt. @@ -256,10 +273,9 @@ class ReceiptEventSource(EventSource[int, JsonDict]): room_ids, from_key=from_key, to_key=to_key ) - if self.config.experimental.msc2285_enabled: - events = ReceiptEventSource.filter_out_private_receipts( - events, user.to_string() - ) + events = ReceiptEventSource.filter_out_private_receipts( + events, user.to_string() + ) return events, to_key diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index e4f2201c92..1ed7230e32 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -416,7 +416,10 @@ class FederationSenderHandler: if not self._is_mine_id(receipt.user_id): continue # Private read receipts never get sent over federation. - if receipt.receipt_type == ReceiptTypes.READ_PRIVATE: + if receipt.receipt_type in ( + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ): continue receipt_info = ReadReceipt( receipt.room_id, diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index 24bc7c9095..a73322a6a4 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -58,7 +58,12 @@ class NotificationsServlet(RestServlet): ) receipts_by_room = await self.store.get_receipts_for_user_with_orderings( - user_id, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + user_id, + [ + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ], ) notif_event_ids = [pa.event_id for pa in push_actions] diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index 8896f2df50..aaad8b233f 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -40,9 +40,13 @@ class ReadMarkerRestServlet(RestServlet): self.read_marker_handler = hs.get_read_marker_handler() self.presence_handler = hs.get_presence_handler() - self._known_receipt_types = {ReceiptTypes.READ, ReceiptTypes.FULLY_READ} + self._known_receipt_types = { + ReceiptTypes.READ, + ReceiptTypes.FULLY_READ, + ReceiptTypes.READ_PRIVATE, + } if hs.config.experimental.msc2285_enabled: - self._known_receipt_types.add(ReceiptTypes.READ_PRIVATE) + self._known_receipt_types.add(ReceiptTypes.UNSTABLE_READ_PRIVATE) async def on_POST( self, request: SynapseRequest, room_id: str diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index 409bfd43c1..c6108fc5eb 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -44,11 +44,13 @@ class ReceiptRestServlet(RestServlet): self.read_marker_handler = hs.get_read_marker_handler() self.presence_handler = hs.get_presence_handler() - self._known_receipt_types = {ReceiptTypes.READ} + self._known_receipt_types = { + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.FULLY_READ, + } if hs.config.experimental.msc2285_enabled: - self._known_receipt_types.update( - (ReceiptTypes.READ_PRIVATE, ReceiptTypes.FULLY_READ) - ) + self._known_receipt_types.add(ReceiptTypes.UNSTABLE_READ_PRIVATE) async def on_POST( self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 0366986755..c9a830cbac 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -94,6 +94,7 @@ class VersionsRestServlet(RestServlet): # Supports the busy presence state described in MSC3026. "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, # Supports receiving private read receipts as per MSC2285 + "org.matrix.msc2285.stable": True, # TODO: Remove when MSC2285 becomes a part of the spec "org.matrix.msc2285": self.config.experimental.msc2285_enabled, # Supports filtering of /publicRooms by room type as per MSC3827 "org.matrix.msc3827.stable": True, diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 5db70f9a60..161aad0f89 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -80,7 +80,7 @@ import attr from synapse.api.constants import ReceiptTypes from synapse.metrics.background_process_metrics import wrap_as_background_process -from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, @@ -259,7 +259,11 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas txn, user_id, room_id, - receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), + receipt_types=( + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ), ) stream_ordering = None @@ -448,6 +452,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas The list will be ordered by ascending stream_ordering. The list will have between 0~limit entries. """ + # find rooms that have a read receipt in them and return the next # push actions def get_after_receipt( @@ -455,7 +460,18 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ) -> List[Tuple[str, str, int, str, bool]]: # find rooms that have a read receipt in them and return the next # push actions - sql = """ + + receipt_types_clause, args = make_in_list_sql_clause( + self.database_engine, + "receipt_type", + ( + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ), + ) + + sql = f""" SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, ep.highlight FROM ( @@ -463,10 +479,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas MAX(stream_ordering) as stream_ordering FROM events INNER JOIN receipts_linearized USING (room_id, event_id) - WHERE receipt_type = 'm.read' AND user_id = ? + WHERE {receipt_types_clause} AND user_id = ? GROUP BY room_id ) AS rl, - event_push_actions AS ep + event_push_actions AS ep WHERE ep.room_id = rl.room_id AND ep.stream_ordering > rl.stream_ordering @@ -476,7 +492,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas AND ep.notif = 1 ORDER BY ep.stream_ordering ASC LIMIT ? """ - args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] + args.extend( + (user_id, user_id, min_stream_ordering, max_stream_ordering, limit) + ) txn.execute(sql, args) return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall()) @@ -490,7 +508,17 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def get_no_receipt( txn: LoggingTransaction, ) -> List[Tuple[str, str, int, str, bool]]: - sql = """ + receipt_types_clause, args = make_in_list_sql_clause( + self.database_engine, + "receipt_type", + ( + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ), + ) + + sql = f""" SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, ep.highlight FROM event_push_actions AS ep @@ -498,7 +526,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas WHERE ep.room_id NOT IN ( SELECT room_id FROM receipts_linearized - WHERE receipt_type = 'm.read' AND user_id = ? + WHERE {receipt_types_clause} AND user_id = ? GROUP BY room_id ) AND ep.user_id = ? @@ -507,7 +535,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas AND ep.notif = 1 ORDER BY ep.stream_ordering ASC LIMIT ? """ - args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] + args.extend( + (user_id, user_id, min_stream_ordering, max_stream_ordering, limit) + ) txn.execute(sql, args) return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall()) @@ -557,12 +587,23 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas The list will be ordered by descending received_ts. The list will have between 0~limit entries. """ + # find rooms that have a read receipt in them and return the most recent # push actions def get_after_receipt( txn: LoggingTransaction, ) -> List[Tuple[str, str, int, str, bool, int]]: - sql = """ + receipt_types_clause, args = make_in_list_sql_clause( + self.database_engine, + "receipt_type", + ( + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ), + ) + + sql = f""" SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, ep.highlight, e.received_ts FROM ( @@ -570,7 +611,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas MAX(stream_ordering) as stream_ordering FROM events INNER JOIN receipts_linearized USING (room_id, event_id) - WHERE receipt_type = 'm.read' AND user_id = ? + WHERE {receipt_types_clause} AND user_id = ? GROUP BY room_id ) AS rl, event_push_actions AS ep @@ -584,7 +625,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas AND ep.notif = 1 ORDER BY ep.stream_ordering DESC LIMIT ? """ - args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] + args.extend( + (user_id, user_id, min_stream_ordering, max_stream_ordering, limit) + ) txn.execute(sql, args) return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall()) @@ -598,7 +641,17 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas def get_no_receipt( txn: LoggingTransaction, ) -> List[Tuple[str, str, int, str, bool, int]]: - sql = """ + receipt_types_clause, args = make_in_list_sql_clause( + self.database_engine, + "receipt_type", + ( + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ), + ) + + sql = f""" SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, ep.highlight, e.received_ts FROM event_push_actions AS ep @@ -606,7 +659,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas WHERE ep.room_id NOT IN ( SELECT room_id FROM receipts_linearized - WHERE receipt_type = 'm.read' AND user_id = ? + WHERE {receipt_types_clause} AND user_id = ? GROUP BY room_id ) AND ep.user_id = ? @@ -615,7 +668,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas AND ep.notif = 1 ORDER BY ep.stream_ordering DESC LIMIT ? """ - args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] + args.extend( + (user_id, user_id, min_stream_ordering, max_stream_ordering, limit) + ) txn.execute(sql, args) return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall()) diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py index a95868b5c0..5f70a2db79 100644 --- a/tests/handlers/test_receipts.py +++ b/tests/handlers/test_receipts.py @@ -15,6 +15,8 @@ from copy import deepcopy from typing import List +from parameterized import parameterized + from synapse.api.constants import EduTypes, ReceiptTypes from synapse.types import JsonDict @@ -25,13 +27,16 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.event_source = hs.get_event_sources().sources.receipt - def test_filters_out_private_receipt(self): + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_filters_out_private_receipt(self, receipt_type: str) -> None: self._test_filters_private( [ { "content": { "$1435641916114394fHBLK:matrix.org": { - ReceiptTypes.READ_PRIVATE: { + receipt_type: { "@rikj:jki.re": { "ts": 1436451550453, } @@ -45,13 +50,18 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): [], ) - def test_filters_out_private_receipt_and_ignores_rest(self): + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_filters_out_private_receipt_and_ignores_rest( + self, receipt_type: str + ) -> None: self._test_filters_private( [ { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - ReceiptTypes.READ_PRIVATE: { + receipt_type: { "@rikj:jki.re": { "ts": 1436451550453, }, @@ -84,13 +94,18 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_filters_out_event_with_only_private_receipts_and_ignores_the_rest(self): + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_filters_out_event_with_only_private_receipts_and_ignores_the_rest( + self, receipt_type: str + ) -> None: self._test_filters_private( [ { "content": { "$14356419edgd14394fHBLK:matrix.org": { - ReceiptTypes.READ_PRIVATE: { + receipt_type: { "@rikj:jki.re": { "ts": 1436451550453, }, @@ -125,7 +140,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_handles_empty_event(self): + def test_handles_empty_event(self) -> None: self._test_filters_private( [ { @@ -160,13 +175,18 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_filters_out_receipt_event_with_only_private_receipt_and_ignores_rest(self): + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_filters_out_receipt_event_with_only_private_receipt_and_ignores_rest( + self, receipt_type: str + ) -> None: self._test_filters_private( [ { "content": { "$14356419edgd14394fHBLK:matrix.org": { - ReceiptTypes.READ_PRIVATE: { + receipt_type: { "@rikj:jki.re": { "ts": 1436451550453, }, @@ -207,7 +227,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_handles_string_data(self): + def test_handles_string_data(self) -> None: """ Tests that an invalid shape for read-receipts is handled. Context: https://github.com/matrix-org/synapse/issues/10603 @@ -242,13 +262,16 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_leaves_our_private_and_their_public(self): + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_leaves_our_private_and_their_public(self, receipt_type: str) -> None: self._test_filters_private( [ { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - ReceiptTypes.READ_PRIVATE: { + receipt_type: { "@me:server.org": { "ts": 1436451550453, }, @@ -273,7 +296,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - ReceiptTypes.READ_PRIVATE: { + receipt_type: { "@me:server.org": { "ts": 1436451550453, }, @@ -296,13 +319,16 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_we_do_not_mutate(self): + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_we_do_not_mutate(self, receipt_type: str) -> None: """Ensure the input values are not modified.""" events = [ { "content": { "$1435641916114394fHBLK:matrix.org": { - ReceiptTypes.READ_PRIVATE: { + receipt_type: { "@rikj:jki.re": { "ts": 1436451550453, } @@ -320,7 +346,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): def _test_filters_private( self, events: List[JsonDict], expected_output: List[JsonDict] - ): + ) -> None: """Tests that the _filter_out_private returns the expected output""" filtered_events = self.event_source.filter_out_private_receipts( events, "@me:server.org" diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index ae16184828..de0dec8539 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -38,7 +38,6 @@ from tests.federation.transport.test_knocking import ( KnockingStrippedStateEventHelperMixin, ) from tests.server import TimedOutException -from tests.unittest import override_config class FilterTestCase(unittest.HomeserverTestCase): @@ -390,6 +389,12 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): sync.register_servlets, ] + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + config = self.default_config() + config["experimental_features"] = {"msc2285_enabled": True} + + return self.setup_test_homeserver(config=config) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.url = "/sync?since=%s" self.next_batch = "s0" @@ -408,15 +413,17 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Join the second user self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) - @override_config({"experimental_features": {"msc2285_enabled": True}}) - def test_private_read_receipts(self) -> None: + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_private_read_receipts(self, receipt_type: str) -> None: # Send a message as the first user res = self.helper.send(self.room_id, body="hello", tok=self.tok) # Send a private read receipt to tell the server the first user's message was read channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res['event_id']}", + f"/rooms/{self.room_id}/receipt/{receipt_type}/{res['event_id']}", {}, access_token=self.tok2, ) @@ -425,8 +432,10 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Test that the first user can't see the other user's private read receipt self.assertIsNone(self._get_read_receipt()) - @override_config({"experimental_features": {"msc2285_enabled": True}}) - def test_public_receipt_can_override_private(self) -> None: + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_public_receipt_can_override_private(self, receipt_type: str) -> None: """ Sending a public read receipt to the same event which has a private read receipt should cause that receipt to become public. @@ -437,7 +446,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Send a private read receipt channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + f"/rooms/{self.room_id}/receipt/{receipt_type}/{res['event_id']}", {}, access_token=self.tok2, ) @@ -456,8 +465,10 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Test that we did override the private read receipt self.assertNotEqual(self._get_read_receipt(), None) - @override_config({"experimental_features": {"msc2285_enabled": True}}) - def test_private_receipt_cannot_override_public(self) -> None: + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_private_receipt_cannot_override_public(self, receipt_type: str) -> None: """ Sending a private read receipt to the same event which has a public read receipt should cause no change. @@ -478,7 +489,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Send a private read receipt channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + f"/rooms/{self.room_id}/receipt/{receipt_type}/{res['event_id']}", {}, access_token=self.tok2, ) @@ -590,7 +601,10 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): tok=self.tok, ) - def test_unread_counts(self) -> None: + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_unread_counts(self, receipt_type: str) -> None: """Tests that /sync returns the right value for the unread count (MSC2654).""" # Check that our own messages don't increase the unread count. @@ -624,7 +638,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): # Send a read receipt to tell the server we've read the latest event. channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res['event_id']}", + f"/rooms/{self.room_id}/receipt/{receipt_type}/{res['event_id']}", {}, access_token=self.tok, ) @@ -700,7 +714,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): self._check_unread_count(5) res2 = self.helper.send(self.room_id, "hello", tok=self.tok2) - # Make sure both m.read and org.matrix.msc2285.read.private advance + # Make sure both m.read and m.read.private advance channel = self.make_request( "POST", f"/rooms/{self.room_id}/receipt/m.read/{res1['event_id']}", @@ -712,16 +726,22 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res2['event_id']}", + f"/rooms/{self.room_id}/receipt/{receipt_type}/{res2['event_id']}", {}, access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.json_body) self._check_unread_count(0) - # We test for both receipt types that influence notification counts - @parameterized.expand([ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]) - def test_read_receipts_only_go_down(self, receipt_type: ReceiptTypes) -> None: + # We test for all three receipt types that influence notification counts + @parameterized.expand( + [ + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ] + ) + def test_read_receipts_only_go_down(self, receipt_type: str) -> None: # Join the new user self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) @@ -739,11 +759,11 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, channel.json_body) self._check_unread_count(0) - # Make sure neither m.read nor org.matrix.msc2285.read.private make the + # Make sure neither m.read nor m.read.private make the # read receipt go up to an older event channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res1['event_id']}", + f"/rooms/{self.room_id}/receipt/{receipt_type}/{res1['event_id']}", {}, access_token=self.tok, ) diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py index b1a8f8bba7..191c957fb5 100644 --- a/tests/storage/test_receipts.py +++ b/tests/storage/test_receipts.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from parameterized import parameterized + from synapse.api.constants import ReceiptTypes from synapse.types import UserID, create_requester @@ -23,7 +25,7 @@ OUR_USER_ID = "@our:test" class ReceiptTestCase(HomeserverTestCase): - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor, clock, homeserver) -> None: super().prepare(reactor, clock, homeserver) self.store = homeserver.get_datastores().main @@ -83,10 +85,15 @@ class ReceiptTestCase(HomeserverTestCase): ) ) - def test_return_empty_with_no_data(self): + def test_return_empty_with_no_data(self) -> None: res = self.get_success( self.store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + OUR_USER_ID, + [ + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ], ) ) self.assertEqual(res, {}) @@ -94,7 +101,11 @@ class ReceiptTestCase(HomeserverTestCase): res = self.get_success( self.store.get_receipts_for_user_with_orderings( OUR_USER_ID, - [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + [ + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ], ) ) self.assertEqual(res, {}) @@ -103,12 +114,19 @@ class ReceiptTestCase(HomeserverTestCase): self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id1, - [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + [ + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ], ) ) self.assertEqual(res, None) - def test_get_receipts_for_user(self): + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_get_receipts_for_user(self, receipt_type: str) -> None: # Send some events into the first room event1_1_id = self.create_and_send_event( self.room_id1, UserID.from_string(OTHER_USER_ID) @@ -126,14 +144,14 @@ class ReceiptTestCase(HomeserverTestCase): # Send private read receipt for the second event self.get_success( self.store.insert_receipt( - self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} + self.room_id1, receipt_type, OUR_USER_ID, [event1_2_id], {} ) ) # Test we get the latest event when we want both private and public receipts res = self.get_success( self.store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + OUR_USER_ID, [ReceiptTypes.READ, receipt_type] ) ) self.assertEqual(res, {self.room_id1: event1_2_id}) @@ -146,7 +164,7 @@ class ReceiptTestCase(HomeserverTestCase): # Test we get the latest event when we want only the public receipt res = self.get_success( - self.store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ_PRIVATE]) + self.store.get_receipts_for_user(OUR_USER_ID, [receipt_type]) ) self.assertEqual(res, {self.room_id1: event1_2_id}) @@ -169,17 +187,20 @@ class ReceiptTestCase(HomeserverTestCase): # Test new room is reflected in what the method returns self.get_success( self.store.insert_receipt( - self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} + self.room_id2, receipt_type, OUR_USER_ID, [event2_1_id], {} ) ) res = self.get_success( self.store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + OUR_USER_ID, [ReceiptTypes.READ, receipt_type] ) ) self.assertEqual(res, {self.room_id1: event1_2_id, self.room_id2: event2_1_id}) - def test_get_last_receipt_event_id_for_user(self): + @parameterized.expand( + [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] + ) + def test_get_last_receipt_event_id_for_user(self, receipt_type: str) -> None: # Send some events into the first room event1_1_id = self.create_and_send_event( self.room_id1, UserID.from_string(OTHER_USER_ID) @@ -197,7 +218,7 @@ class ReceiptTestCase(HomeserverTestCase): # Send private read receipt for the second event self.get_success( self.store.insert_receipt( - self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} + self.room_id1, receipt_type, OUR_USER_ID, [event1_2_id], {} ) ) @@ -206,7 +227,7 @@ class ReceiptTestCase(HomeserverTestCase): self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id1, - [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + [ReceiptTypes.READ, receipt_type], ) ) self.assertEqual(res, event1_2_id) @@ -222,7 +243,7 @@ class ReceiptTestCase(HomeserverTestCase): # Test we get the latest event when we want only the private receipt res = self.get_success( self.store.get_last_receipt_event_id_for_user( - OUR_USER_ID, self.room_id1, [ReceiptTypes.READ_PRIVATE] + OUR_USER_ID, self.room_id1, [receipt_type] ) ) self.assertEqual(res, event1_2_id) @@ -248,14 +269,14 @@ class ReceiptTestCase(HomeserverTestCase): # Test new room is reflected in what the method returns self.get_success( self.store.insert_receipt( - self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} + self.room_id2, receipt_type, OUR_USER_ID, [event2_1_id], {} ) ) res = self.get_success( self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id2, - [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + [ReceiptTypes.READ, receipt_type], ) ) self.assertEqual(res, event2_1_id) -- cgit 1.4.1 From 7a199951202f53cef398507439bde306e4833219 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 8 Aug 2022 16:59:56 +0100 Subject: Correct a misnamed argument in state res v2 (#13467) In state res v2, we apply two passes of iterative auth checks. The first pass replays power events and events in their auth chains, but only those belonging to the full conflicted set. The source code as written suggests that we want only those belonging to the auth difference (which is a smaller set of events). At runtime we were doing the correct thing anyway, because the only callsite of `_reverse_topological_power_sort` passes in the `full_conflicted_set`. So this really is just a rename. --- changelog.d/13467.misc | 1 + synapse/state/v2.py | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13467.misc diff --git a/changelog.d/13467.misc b/changelog.d/13467.misc new file mode 100644 index 0000000000..b8b7d65c16 --- /dev/null +++ b/changelog.d/13467.misc @@ -0,0 +1 @@ +Correct a misnamed argument in state res v2 internals. diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 7db032203b..cf3045f82e 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -434,7 +434,7 @@ async def _add_event_and_auth_chain_to_graph( event_id: str, event_map: Dict[str, EventBase], state_res_store: StateResolutionStore, - auth_diff: Set[str], + full_conflicted_set: Set[str], ) -> None: """Helper function for _reverse_topological_power_sort that add the event and its auth chain (that is in the auth diff) to the graph @@ -445,7 +445,7 @@ async def _add_event_and_auth_chain_to_graph( event_id: Event to add to the graph event_map state_res_store - auth_diff: Set of event IDs that are in the auth difference. + full_conflicted_set: Set of event IDs that are in the full conflicted set. """ state = [event_id] @@ -455,7 +455,7 @@ async def _add_event_and_auth_chain_to_graph( event = await _get_event(room_id, eid, event_map, state_res_store) for aid in event.auth_event_ids(): - if aid in auth_diff: + if aid in full_conflicted_set: if aid not in graph: state.append(aid) @@ -468,7 +468,7 @@ async def _reverse_topological_power_sort( event_ids: Iterable[str], event_map: Dict[str, EventBase], state_res_store: StateResolutionStore, - auth_diff: Set[str], + full_conflicted_set: Set[str], ) -> List[str]: """Returns a list of the event_ids sorted by reverse topological ordering, and then by power level and origin_server_ts @@ -479,7 +479,7 @@ async def _reverse_topological_power_sort( event_ids: The events to sort event_map state_res_store - auth_diff: Set of event IDs that are in the auth difference. + full_conflicted_set: Set of event IDs that are in the full conflicted set. Returns: The sorted list @@ -488,7 +488,7 @@ async def _reverse_topological_power_sort( graph: Dict[str, Set[str]] = {} for idx, event_id in enumerate(event_ids, start=1): await _add_event_and_auth_chain_to_graph( - graph, room_id, event_id, event_map, state_res_store, auth_diff + graph, room_id, event_id, event_map, state_res_store, full_conflicted_set ) # We await occasionally when we're working with large data sets to -- cgit 1.4.1 From c97042f7eef3748e17c90e48a4122389a89c4735 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 8 Aug 2022 22:21:27 +0200 Subject: Use literals in place of `HTTPStatus` constants in tests (#13469) --- changelog.d/13469.misc | 1 + tests/rest/admin/test_admin.py | 24 +-- tests/rest/admin/test_background_updates.py | 18 +- tests/rest/admin/test_device.py | 36 ++-- tests/rest/admin/test_event_reports.py | 34 ++-- tests/rest/admin/test_federation.py | 46 +++--- tests/rest/admin/test_media.py | 51 +++--- tests/rest/admin/test_registration_tokens.py | 32 ++-- tests/rest/admin/test_room.py | 124 +++++++------- tests/rest/admin/test_server_notice.py | 14 +- tests/rest/admin/test_statistics.py | 36 ++-- tests/rest/admin/test_user.py | 238 +++++++++++++-------------- tests/rest/admin/test_username_available.py | 6 +- 13 files changed, 329 insertions(+), 331 deletions(-) create mode 100644 changelog.d/13469.misc diff --git a/changelog.d/13469.misc b/changelog.d/13469.misc new file mode 100644 index 0000000000..315930deab --- /dev/null +++ b/changelog.d/13469.misc @@ -0,0 +1 @@ +Use literals in place of `HTTPStatus` constants in tests. \ No newline at end of file diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 82ac5991e6..06e74d5e58 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -42,7 +42,7 @@ class VersionTestCase(unittest.HomeserverTestCase): def test_version_string(self) -> None: channel = self.make_request("GET", self.url, shorthand=False) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual( {"server_version", "python_version"}, set(channel.json_body.keys()) ) @@ -139,7 +139,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): ) # Should be successful - self.assertEqual(HTTPStatus.OK, channel.code) + self.assertEqual(200, channel.code) # Quarantine the media url = "/_synapse/admin/v1/media/quarantine/%s/%s" % ( @@ -152,7 +152,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): access_token=admin_user_tok, ) self.pump(1.0) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Attempt to access the media self._ensure_quarantined(admin_user_tok, server_name_and_media_id) @@ -209,7 +209,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): access_token=admin_user_tok, ) self.pump(1.0) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body, {"num_quarantined": 2}, "Expected 2 quarantined items" ) @@ -251,7 +251,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): access_token=admin_user_tok, ) self.pump(1.0) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body, {"num_quarantined": 2}, "Expected 2 quarantined items" ) @@ -285,7 +285,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): url = "/_synapse/admin/v1/media/protect/%s" % (urllib.parse.quote(media_id_2),) channel = self.make_request("POST", url, access_token=admin_user_tok) self.pump(1.0) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Quarantine all media by this user url = "/_synapse/admin/v1/user/%s/media/quarantine" % urllib.parse.quote( @@ -297,7 +297,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): access_token=admin_user_tok, ) self.pump(1.0) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body, {"num_quarantined": 1}, "Expected 1 quarantined item" ) @@ -318,10 +318,10 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): # Shouldn't be quarantined self.assertEqual( - HTTPStatus.OK, + 200, channel.code, msg=( - "Expected to receive a HTTPStatus.OK on accessing not-quarantined media: %s" + "Expected to receive a 200 on accessing not-quarantined media: %s" % server_and_media_id_2 ), ) @@ -350,7 +350,7 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase): def test_purge_history(self) -> None: """ Simple test of purge history API. - Test only that is is possible to call, get status HTTPStatus.OK and purge_id. + Test only that is is possible to call, get status 200 and purge_id. """ channel = self.make_request( @@ -360,7 +360,7 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("purge_id", channel.json_body) purge_id = channel.json_body["purge_id"] @@ -371,5 +371,5 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("complete", channel.json_body["status"]) diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index 6cf56b1e35..8295ecf248 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -125,7 +125,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/background_updates/status", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Background updates should be enabled, but none should be running. self.assertDictEqual( @@ -147,7 +147,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/background_updates/status", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Background updates should be enabled, and one should be running. self.assertDictEqual( @@ -181,7 +181,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/background_updates/enabled", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertDictEqual(channel.json_body, {"enabled": True}) # Disable the BG updates @@ -191,7 +191,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): content={"enabled": False}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertDictEqual(channel.json_body, {"enabled": False}) # Advance a bit and get the current status, note this will finish the in @@ -204,7 +204,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/background_updates/status", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertDictEqual( channel.json_body, { @@ -231,7 +231,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/background_updates/status", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # There should be no change from the previous /status response. self.assertDictEqual( @@ -259,7 +259,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): content={"enabled": True}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertDictEqual(channel.json_body, {"enabled": True}) @@ -270,7 +270,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "/_synapse/admin/v1/background_updates/status", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Background updates should be enabled and making progress. self.assertDictEqual( @@ -325,7 +325,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # test that each background update is waiting now for update in updates: diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index f7080bda87..779f1bfac1 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -122,7 +122,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): def test_unknown_device(self) -> None: """ - Tests that a lookup for a device that does not exist returns either HTTPStatus.NOT_FOUND or HTTPStatus.OK. + Tests that a lookup for a device that does not exist returns either HTTPStatus.NOT_FOUND or 200. """ url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote( self.other_user @@ -143,7 +143,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) channel = self.make_request( "DELETE", @@ -151,8 +151,8 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - # Delete unknown device returns status HTTPStatus.OK - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + # Delete unknown device returns status 200 + self.assertEqual(200, channel.code, msg=channel.json_body) def test_update_device_too_long_display_name(self) -> None: """ @@ -189,12 +189,12 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new display", channel.json_body["display_name"]) def test_update_no_display_name(self) -> None: """ - Tests that a update for a device without JSON returns a HTTPStatus.OK + Tests that a update for a device without JSON returns a 200 """ # Set iniital display name. update = {"display_name": "new display"} @@ -210,7 +210,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Ensure the display name was not updated. channel = self.make_request( @@ -219,7 +219,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new display", channel.json_body["display_name"]) def test_update_display_name(self) -> None: @@ -234,7 +234,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): content={"display_name": "new displayname"}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Check new display_name channel = self.make_request( @@ -243,7 +243,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new displayname", channel.json_body["display_name"]) def test_get_device(self) -> None: @@ -256,7 +256,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(self.other_user, channel.json_body["user_id"]) # Check that all fields are available self.assertIn("user_id", channel.json_body) @@ -281,7 +281,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Ensure that the number of devices is decreased res = self.get_success(self.handler.get_devices_by_user(self.other_user)) @@ -379,7 +379,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["devices"])) @@ -399,7 +399,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_devices, channel.json_body["total"]) self.assertEqual(number_devices, len(channel.json_body["devices"])) self.assertEqual(self.other_user, channel.json_body["devices"][0]["user_id"]) @@ -494,7 +494,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): def test_unknown_devices(self) -> None: """ - Tests that a remove of a device that does not exist returns HTTPStatus.OK. + Tests that a remove of a device that does not exist returns 200. """ channel = self.make_request( "POST", @@ -503,8 +503,8 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): content={"devices": ["unknown_device1", "unknown_device2"]}, ) - # Delete unknown devices returns status HTTPStatus.OK - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + # Delete unknown devices returns status 200 + self.assertEqual(200, channel.code, msg=channel.json_body) def test_delete_devices(self) -> None: """ @@ -533,7 +533,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): content={"devices": device_ids}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) res = self.get_success(self.handler.get_devices_by_user(self.other_user)) self.assertEqual(0, len(res)) diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 4f89f8b534..9bc6ce62cb 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -117,7 +117,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) self.assertNotIn("next_token", channel.json_body) @@ -134,7 +134,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 5) self.assertEqual(channel.json_body["next_token"], 5) @@ -151,7 +151,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 15) self.assertNotIn("next_token", channel.json_body) @@ -168,7 +168,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(channel.json_body["next_token"], 15) self.assertEqual(len(channel.json_body["event_reports"]), 10) @@ -185,7 +185,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 10) self.assertEqual(len(channel.json_body["event_reports"]), 10) self.assertNotIn("next_token", channel.json_body) @@ -205,7 +205,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 10) self.assertEqual(len(channel.json_body["event_reports"]), 10) self.assertNotIn("next_token", channel.json_body) @@ -225,7 +225,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 5) self.assertEqual(len(channel.json_body["event_reports"]), 5) self.assertNotIn("next_token", channel.json_body) @@ -247,7 +247,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) report = 1 @@ -265,7 +265,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) report = 1 @@ -344,7 +344,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) self.assertNotIn("next_token", channel.json_body) @@ -357,7 +357,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) self.assertNotIn("next_token", channel.json_body) @@ -370,7 +370,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 19) self.assertEqual(channel.json_body["next_token"], 19) @@ -384,7 +384,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 1) self.assertNotIn("next_token", channel.json_body) @@ -400,7 +400,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): {"score": -100, "reason": "this makes me sad"}, access_token=user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) def _create_event_and_report_without_parameters( self, room_id: str, user_tok: str @@ -415,7 +415,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): {}, access_token=user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) def _check_fields(self, content: List[JsonDict]) -> None: """Checks that all attributes are present in an event report""" @@ -502,7 +502,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self._check_fields(channel.json_body) def test_invalid_report_id(self) -> None: @@ -594,7 +594,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): {"score": -100, "reason": "this makes me sad"}, access_token=user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) def _check_fields(self, content: JsonDict) -> None: """Checks that all attributes are present in a event report""" diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index 929bbdc37d..c3927c2735 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -142,7 +142,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(len(channel.json_body["destinations"]), 5) self.assertEqual(channel.json_body["next_token"], "5") @@ -160,7 +160,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(len(channel.json_body["destinations"]), 15) self.assertNotIn("next_token", channel.json_body) @@ -178,7 +178,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(channel.json_body["next_token"], "15") self.assertEqual(len(channel.json_body["destinations"]), 10) @@ -198,7 +198,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(len(channel.json_body["destinations"]), number_destinations) self.assertNotIn("next_token", channel.json_body) @@ -211,7 +211,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(len(channel.json_body["destinations"]), number_destinations) self.assertNotIn("next_token", channel.json_body) @@ -224,7 +224,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(len(channel.json_body["destinations"]), 19) self.assertEqual(channel.json_body["next_token"], "19") @@ -238,7 +238,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(len(channel.json_body["destinations"]), 1) self.assertNotIn("next_token", channel.json_body) @@ -255,7 +255,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_destinations, len(channel.json_body["destinations"])) self.assertEqual(number_destinations, channel.json_body["total"]) @@ -290,7 +290,7 @@ class FederationTestCase(unittest.HomeserverTestCase): url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], len(expected_destination_list)) returned_order = [ @@ -376,7 +376,7 @@ class FederationTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Check that destinations were returned self.assertTrue("destinations" in channel.json_body) @@ -418,7 +418,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("sub0.example.com", channel.json_body["destination"]) # Check that all fields are available @@ -435,7 +435,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("sub0.example.com", channel.json_body["destination"]) self.assertEqual(0, channel.json_body["retry_last_ts"]) self.assertEqual(0, channel.json_body["retry_interval"]) @@ -452,7 +452,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) retry_timings = self.get_success( self.store.get_destination_retry_timings("sub0.example.com") @@ -619,7 +619,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_rooms) self.assertEqual(len(channel.json_body["rooms"]), 3) self.assertEqual(channel.json_body["next_token"], "3") @@ -637,7 +637,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_rooms) self.assertEqual(len(channel.json_body["rooms"]), 5) self.assertNotIn("next_token", channel.json_body) @@ -655,7 +655,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_rooms) self.assertEqual(channel.json_body["next_token"], "8") self.assertEqual(len(channel.json_body["rooms"]), 5) @@ -673,7 +673,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel_asc.code, msg=channel_asc.json_body) + self.assertEqual(200, channel_asc.code, msg=channel_asc.json_body) self.assertEqual(channel_asc.json_body["total"], number_rooms) self.assertEqual(number_rooms, len(channel_asc.json_body["rooms"])) self._check_fields(channel_asc.json_body["rooms"]) @@ -685,7 +685,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel_desc.code, msg=channel_desc.json_body) + self.assertEqual(200, channel_desc.code, msg=channel_desc.json_body) self.assertEqual(channel_desc.json_body["total"], number_rooms) self.assertEqual(number_rooms, len(channel_desc.json_body["rooms"])) self._check_fields(channel_desc.json_body["rooms"]) @@ -711,7 +711,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_rooms) self.assertEqual(len(channel.json_body["rooms"]), number_rooms) self.assertNotIn("next_token", channel.json_body) @@ -724,7 +724,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_rooms) self.assertEqual(len(channel.json_body["rooms"]), number_rooms) self.assertNotIn("next_token", channel.json_body) @@ -737,7 +737,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_rooms) self.assertEqual(len(channel.json_body["rooms"]), 4) self.assertEqual(channel.json_body["next_token"], "4") @@ -751,7 +751,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_rooms) self.assertEqual(len(channel.json_body["rooms"]), 1) self.assertNotIn("next_token", channel.json_body) @@ -767,7 +767,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_rooms) self.assertEqual(number_rooms, len(channel.json_body["rooms"])) self._check_fields(channel.json_body["rooms"]) diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index e909e444ac..92fd6c780d 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -131,7 +131,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): upload_resource, SMALL_PNG, tok=self.admin_user_tok, - expect_code=HTTPStatus.OK, + expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' @@ -151,11 +151,10 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): # Should be successful self.assertEqual( - HTTPStatus.OK, + 200, channel.code, msg=( - "Expected to receive a HTTPStatus.OK on accessing media: %s" - % server_and_media_id + "Expected to receive a 200 on accessing media: %s" % server_and_media_id ), ) @@ -172,7 +171,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( media_id, @@ -388,7 +387,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( media_id, @@ -413,7 +412,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self._access_media(server_and_media_id) @@ -425,7 +424,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( server_and_media_id.split("/")[1], @@ -449,7 +448,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&size_gt=67", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self._access_media(server_and_media_id) @@ -460,7 +459,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&size_gt=66", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( server_and_media_id.split("/")[1], @@ -485,7 +484,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): content={"avatar_url": "mxc://%s" % (server_and_media_id,)}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) now_ms = self.clock.time_msec() channel = self.make_request( @@ -493,7 +492,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self._access_media(server_and_media_id) @@ -504,7 +503,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( server_and_media_id.split("/")[1], @@ -530,7 +529,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): content={"url": "mxc://%s" % (server_and_media_id,)}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) now_ms = self.clock.time_msec() channel = self.make_request( @@ -538,7 +537,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self._access_media(server_and_media_id) @@ -549,7 +548,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( server_and_media_id.split("/")[1], @@ -569,7 +568,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): upload_resource, SMALL_PNG, tok=self.admin_user_tok, - expect_code=HTTPStatus.OK, + expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' @@ -602,10 +601,10 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): if expect_success: self.assertEqual( - HTTPStatus.OK, + 200, channel.code, msg=( - "Expected to receive a HTTPStatus.OK on accessing media: %s" + "Expected to receive a 200 on accessing media: %s" % server_and_media_id ), ) @@ -648,7 +647,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): upload_resource, SMALL_PNG, tok=self.admin_user_tok, - expect_code=HTTPStatus.OK, + expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' @@ -712,7 +711,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) @@ -726,7 +725,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) @@ -753,7 +752,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) # verify that is not in quarantine @@ -785,7 +784,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): upload_resource, SMALL_PNG, tok=self.admin_user_tok, - expect_code=HTTPStatus.OK, + expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' @@ -845,7 +844,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) @@ -859,7 +858,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py index 8354250ec2..544daaa4c8 100644 --- a/tests/rest/admin/test_registration_tokens.py +++ b/tests/rest/admin/test_registration_tokens.py @@ -105,7 +105,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(len(channel.json_body["token"]), 16) self.assertIsNone(channel.json_body["uses_allowed"]) self.assertIsNone(channel.json_body["expiry_time"]) @@ -129,7 +129,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["token"], token) self.assertEqual(channel.json_body["uses_allowed"], 1) self.assertEqual(channel.json_body["expiry_time"], data["expiry_time"]) @@ -150,7 +150,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(len(channel.json_body["token"]), 16) self.assertIsNone(channel.json_body["uses_allowed"]) self.assertIsNone(channel.json_body["expiry_time"]) @@ -207,7 +207,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): data, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel1.code, msg=channel1.json_body) + self.assertEqual(200, channel1.code, msg=channel1.json_body) channel2 = self.make_request( "POST", @@ -251,7 +251,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"uses_allowed": 0}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["uses_allowed"], 0) # Should fail with negative integer @@ -321,7 +321,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"length": 64}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(len(channel.json_body["token"]), 64) # Should fail with 0 @@ -439,7 +439,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"uses_allowed": 1}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["uses_allowed"], 1) self.assertIsNone(channel.json_body["expiry_time"]) @@ -450,7 +450,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"uses_allowed": 0}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["uses_allowed"], 0) self.assertIsNone(channel.json_body["expiry_time"]) @@ -461,7 +461,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"uses_allowed": None}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIsNone(channel.json_body["uses_allowed"]) self.assertIsNone(channel.json_body["expiry_time"]) @@ -506,7 +506,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"expiry_time": new_expiry_time}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["expiry_time"], new_expiry_time) self.assertIsNone(channel.json_body["uses_allowed"]) @@ -517,7 +517,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"expiry_time": None}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIsNone(channel.json_body["expiry_time"]) self.assertIsNone(channel.json_body["uses_allowed"]) @@ -568,7 +568,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["uses_allowed"], 1) self.assertEqual(channel.json_body["expiry_time"], new_expiry_time) @@ -655,7 +655,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # GETTING ONE @@ -716,7 +716,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["token"], token) self.assertIsNone(channel.json_body["uses_allowed"]) self.assertIsNone(channel.json_body["expiry_time"]) @@ -762,7 +762,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(len(channel.json_body["registration_tokens"]), 1) token_info = channel.json_body["registration_tokens"][0] self.assertEqual(token_info["token"], token) @@ -816,7 +816,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(len(channel.json_body["registration_tokens"]), 2) token_info_1 = channel.json_body["registration_tokens"][0] token_info_2 = channel.json_body["registration_tokens"][1] diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 989cbdb5e2..6ea7858db7 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -94,7 +94,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) def test_room_is_not_valid(self) -> None: """ @@ -127,7 +127,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("new_room_id", channel.json_body) self.assertIn("kicked_users", channel.json_body) self.assertIn("failed_to_kick_users", channel.json_body) @@ -202,7 +202,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(None, channel.json_body["new_room_id"]) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) self.assertIn("failed_to_kick_users", channel.json_body) @@ -233,7 +233,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(None, channel.json_body["new_room_id"]) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) self.assertIn("failed_to_kick_users", channel.json_body) @@ -265,7 +265,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(None, channel.json_body["new_room_id"]) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) self.assertIn("failed_to_kick_users", channel.json_body) @@ -296,7 +296,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): ) # The room is now blocked. - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self._is_blocked(room_id) def test_shutdown_room_consent(self) -> None: @@ -337,7 +337,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) self.assertIn("new_room_id", channel.json_body) self.assertIn("failed_to_kick_users", channel.json_body) @@ -366,7 +366,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): {"history_visibility": "world_readable"}, access_token=self.other_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Test that room is not purged with self.assertRaises(AssertionError): @@ -383,7 +383,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) self.assertIn("new_room_id", channel.json_body) self.assertIn("failed_to_kick_users", channel.json_body) @@ -522,7 +522,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id = channel.json_body["delete_id"] @@ -533,7 +533,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, len(channel.json_body["results"])) self.assertEqual("complete", channel.json_body["results"][0]["status"]) self.assertEqual(delete_id, channel.json_body["results"][0]["delete_id"]) @@ -574,7 +574,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id = channel.json_body["delete_id"] @@ -639,7 +639,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id1 = channel.json_body["delete_id"] @@ -654,7 +654,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id2 = channel.json_body["delete_id"] @@ -665,7 +665,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(2, len(channel.json_body["results"])) self.assertEqual("complete", channel.json_body["results"][0]["status"]) self.assertEqual("complete", channel.json_body["results"][1]["status"]) @@ -682,7 +682,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, len(channel.json_body["results"])) self.assertEqual("complete", channel.json_body["results"][0]["status"]) self.assertEqual(delete_id2, channel.json_body["results"][0]["delete_id"]) @@ -733,7 +733,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): # get result of first call first_channel.await_result() - self.assertEqual(HTTPStatus.OK, first_channel.code, msg=first_channel.json_body) + self.assertEqual(200, first_channel.code, msg=first_channel.json_body) self.assertIn("delete_id", first_channel.json_body) # check status after finish the task @@ -764,7 +764,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id = channel.json_body["delete_id"] @@ -795,7 +795,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id = channel.json_body["delete_id"] @@ -827,7 +827,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id = channel.json_body["delete_id"] @@ -876,7 +876,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id = channel.json_body["delete_id"] @@ -887,7 +887,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.url_status_by_room_id, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, len(channel.json_body["results"])) # Test that member has moved to new room @@ -914,7 +914,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): content={"history_visibility": "world_readable"}, access_token=self.other_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Test that room is not purged with self.assertRaises(AssertionError): @@ -931,7 +931,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("delete_id", channel.json_body) delete_id = channel.json_body["delete_id"] @@ -942,7 +942,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.url_status_by_room_id, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, len(channel.json_body["results"])) # Test that member has moved to new room @@ -1026,9 +1026,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.url_status_by_room_id, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.OK, channel_room_id.code, msg=channel_room_id.json_body - ) + self.assertEqual(200, channel_room_id.code, msg=channel_room_id.json_body) self.assertEqual(1, len(channel_room_id.json_body["results"])) self.assertEqual( delete_id, channel_room_id.json_body["results"][0]["delete_id"] @@ -1041,7 +1039,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) self.assertEqual( - HTTPStatus.OK, + 200, channel_delete_id.code, msg=channel_delete_id.json_body, ) @@ -1100,7 +1098,7 @@ class RoomTestCase(unittest.HomeserverTestCase): ) # Check request completed successfully - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Check that response json body contains a "rooms" key self.assertTrue( @@ -1186,7 +1184,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertTrue("rooms" in channel.json_body) for r in channel.json_body["rooms"]: @@ -1226,7 +1224,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) def test_correct_room_attributes(self) -> None: """Test the correct attributes for a room are returned""" @@ -1253,7 +1251,7 @@ class RoomTestCase(unittest.HomeserverTestCase): {"room_id": room_id}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Set this new alias as the canonical alias for this room self.helper.send_state( @@ -1285,7 +1283,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Check that rooms were returned self.assertTrue("rooms" in channel.json_body) @@ -1341,7 +1339,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Check that rooms were returned self.assertTrue("rooms" in channel.json_body) @@ -1487,7 +1485,7 @@ class RoomTestCase(unittest.HomeserverTestCase): def _search_test( expected_room_id: Optional[str], search_term: str, - expected_http_code: int = HTTPStatus.OK, + expected_http_code: int = 200, ) -> None: """Search for a room and check that the returned room's id is a match @@ -1505,7 +1503,7 @@ class RoomTestCase(unittest.HomeserverTestCase): ) self.assertEqual(expected_http_code, channel.code, msg=channel.json_body) - if expected_http_code != HTTPStatus.OK: + if expected_http_code != 200: return # Check that rooms were returned @@ -1585,7 +1583,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(room_id, channel.json_body["rooms"][0].get("room_id")) self.assertEqual("ж", channel.json_body["rooms"][0].get("name")) @@ -1618,7 +1616,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("room_id", channel.json_body) self.assertIn("name", channel.json_body) @@ -1650,7 +1648,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["joined_local_devices"]) # Have another user join the room @@ -1664,7 +1662,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(2, channel.json_body["joined_local_devices"]) # leave room @@ -1676,7 +1674,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["joined_local_devices"]) def test_room_members(self) -> None: @@ -1707,7 +1705,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertCountEqual( ["@admin:test", "@foo:test", "@bar:test"], channel.json_body["members"] @@ -1720,7 +1718,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertCountEqual( ["@admin:test", "@bar:test", "@foobar:test"], channel.json_body["members"] @@ -1738,7 +1736,7 @@ class RoomTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertIn("state", channel.json_body) # testing that the state events match is painful and not done here. We assume that # the create_room already does the right thing, so no need to verify that we got @@ -1755,7 +1753,7 @@ class RoomTestCase(unittest.HomeserverTestCase): {"room_id": room_id}, access_token=admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Set this new alias as the canonical alias for this room self.helper.send_state( @@ -1924,7 +1922,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(self.public_room_id, channel.json_body["room_id"]) # Validate if user is a member of the room @@ -1934,7 +1932,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0]) def test_join_private_room_if_not_member(self) -> None: @@ -1982,7 +1980,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/joined_rooms", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) # Join user to room. @@ -1995,7 +1993,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): content={"user_id": self.second_user_id}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(private_room_id, channel.json_body["room_id"]) # Validate if user is a member of the room @@ -2005,7 +2003,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) def test_join_private_room_if_owner(self) -> None: @@ -2025,7 +2023,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(private_room_id, channel.json_body["room_id"]) # Validate if user is a member of the room @@ -2035,7 +2033,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) def test_context_as_non_admin(self) -> None: @@ -2099,7 +2097,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): % (room_id, events[midway]["event_id"]), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body["event"]["event_id"], events[midway]["event_id"] ) @@ -2158,7 +2156,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Now we test that we can join the room and ban a user. self.helper.join(room_id, self.admin_user, tok=self.admin_user_tok) @@ -2185,7 +2183,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Now we test that we can join the room (we should have received an # invite) and can ban a user. @@ -2211,7 +2209,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Now we test that we can join the room and ban a user. self.helper.join(room_id, self.second_user_id, tok=self.second_tok) @@ -2354,7 +2352,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): content={"block": True}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["block"]) self._is_blocked(room_id, expect=True) @@ -2378,7 +2376,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): content={"block": True}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["block"]) self._is_blocked(self.room_id, expect=True) @@ -2394,7 +2392,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): content={"block": False}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body["block"]) self._is_blocked(room_id, expect=False) @@ -2418,7 +2416,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): content={"block": False}, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body["block"]) self._is_blocked(self.room_id, expect=False) @@ -2433,7 +2431,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): self.url % room_id, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["block"]) self.assertEqual(self.other_user, channel.json_body["user_id"]) @@ -2457,7 +2455,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): self.url % room_id, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body["block"]) self.assertNotIn("user_id", channel.json_body) diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index dbcba2663c..bea3ac34d8 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -197,7 +197,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): "content": {"msgtype": "m.text", "body": "test msg one"}, }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) @@ -226,7 +226,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): "content": {"msgtype": "m.text", "body": "test msg two"}, }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # user has no new invites or memberships self._check_invite_and_join_status(self.other_user, 0, 1) @@ -260,7 +260,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): "content": {"msgtype": "m.text", "body": "test msg one"}, }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) @@ -301,7 +301,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): "content": {"msgtype": "m.text", "body": "test msg two"}, }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) @@ -341,7 +341,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): "content": {"msgtype": "m.text", "body": "test msg one"}, }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) @@ -388,7 +388,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): "content": {"msgtype": "m.text", "body": "test msg two"}, }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) @@ -538,7 +538,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", "/_matrix/client/r0/sync", access_token=token ) - self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.code, 200) # Get the messages room = channel.json_body["rooms"]["join"][room_id] diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index 7cb8ec57ba..baed27a815 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -204,7 +204,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 10) self.assertEqual(len(channel.json_body["users"]), 5) self.assertEqual(channel.json_body["next_token"], 5) @@ -222,7 +222,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["users"]), 15) self.assertNotIn("next_token", channel.json_body) @@ -240,7 +240,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(channel.json_body["next_token"], 15) self.assertEqual(len(channel.json_body["users"]), 10) @@ -262,7 +262,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), number_users) self.assertNotIn("next_token", channel.json_body) @@ -275,7 +275,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), number_users) self.assertNotIn("next_token", channel.json_body) @@ -288,7 +288,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 19) self.assertEqual(channel.json_body["next_token"], 19) @@ -301,7 +301,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 1) self.assertNotIn("next_token", channel.json_body) @@ -318,7 +318,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["users"])) @@ -415,7 +415,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["users"][0]["media_count"], 3) # filter media starting at `ts1` after creating first media @@ -425,7 +425,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?from_ts=%s" % (ts1,), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 0) self._create_media(self.other_user_tok, 3) @@ -440,7 +440,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?from_ts=%s&until_ts=%s" % (ts1, ts2), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["users"][0]["media_count"], 3) # filter media until `ts2` and earlier @@ -449,7 +449,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?until_ts=%s" % (ts2,), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["users"][0]["media_count"], 6) def test_search_term(self) -> None: @@ -461,7 +461,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) # filter user 1 and 10-19 by `user_id` @@ -470,7 +470,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?search_term=foo_user_1", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 11) # filter on this user in `displayname` @@ -479,7 +479,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?search_term=bar_user_10", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["users"][0]["displayname"], "bar_user_10") self.assertEqual(channel.json_body["total"], 1) @@ -489,7 +489,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url + "?search_term=foobar", access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 0) def _create_users_with_media(self, number_users: int, media_per_user: int) -> None: @@ -515,7 +515,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): for _ in range(number_media): # Upload some media into the room self.helper.upload_media( - upload_resource, SMALL_PNG, tok=user_token, expect_code=HTTPStatus.OK + upload_resource, SMALL_PNG, tok=user_token, expect_code=200 ) def _check_fields(self, content: List[JsonDict]) -> None: @@ -549,7 +549,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): url.encode("ascii"), access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], len(expected_user_list)) returned_order = [row["user_id"] for row in channel.json_body["users"]] diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 12db68d564..c2b54b1ef7 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -169,7 +169,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["user_id"]) def test_nonce_reuse(self) -> None: @@ -192,7 +192,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["user_id"]) # Now, try and reuse it @@ -323,11 +323,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob1:test", channel.json_body["user_id"]) channel = self.make_request("GET", "/profile/@bob1:test/displayname") - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("bob1", channel.json_body["displayname"]) # displayname is None @@ -347,11 +347,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob2:test", channel.json_body["user_id"]) channel = self.make_request("GET", "/profile/@bob2:test/displayname") - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("bob2", channel.json_body["displayname"]) # displayname is empty @@ -371,7 +371,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob3:test", channel.json_body["user_id"]) channel = self.make_request("GET", "/profile/@bob3:test/displayname") @@ -394,11 +394,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob4:test", channel.json_body["user_id"]) channel = self.make_request("GET", "/profile/@bob4:test/displayname") - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("Bob's Name", channel.json_body["displayname"]) @override_config( @@ -442,7 +442,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["user_id"]) @@ -494,7 +494,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(3, len(channel.json_body["users"])) self.assertEqual(3, channel.json_body["total"]) @@ -508,7 +508,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): expected_user_id: Optional[str], search_term: str, search_field: Optional[str] = "name", - expected_http_code: Optional[int] = HTTPStatus.OK, + expected_http_code: Optional[int] = 200, ) -> None: """Search for a user and check that the returned user's id is a match @@ -530,7 +530,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): ) self.assertEqual(expected_http_code, channel.code, msg=channel.json_body) - if expected_http_code != HTTPStatus.OK: + if expected_http_code != 200: return # Check that users were returned @@ -659,7 +659,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 5) self.assertEqual(channel.json_body["next_token"], "5") @@ -680,7 +680,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 15) self.assertNotIn("next_token", channel.json_body) @@ -701,7 +701,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(channel.json_body["next_token"], "15") self.assertEqual(len(channel.json_body["users"]), 10) @@ -724,7 +724,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), number_users) self.assertNotIn("next_token", channel.json_body) @@ -737,7 +737,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), number_users) self.assertNotIn("next_token", channel.json_body) @@ -750,7 +750,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 19) self.assertEqual(channel.json_body["next_token"], "19") @@ -764,7 +764,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 1) self.assertNotIn("next_token", channel.json_body) @@ -867,7 +867,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], len(expected_user_list)) returned_order = [row["name"] for row in channel.json_body["users"]] @@ -1017,7 +1017,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["deactivated"]) self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) @@ -1032,7 +1032,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): content={"erase": True}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Get user channel = self.make_request( @@ -1041,7 +1041,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(True, channel.json_body["deactivated"]) self.assertEqual(0, len(channel.json_body["threepids"])) @@ -1066,7 +1066,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"erase": True}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self._is_erased("@user:test", True) def test_deactivate_user_erase_false(self) -> None: @@ -1081,7 +1081,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["deactivated"]) self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) @@ -1096,7 +1096,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): content={"erase": False}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Get user channel = self.make_request( @@ -1105,7 +1105,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(True, channel.json_body["deactivated"]) self.assertEqual(0, len(channel.json_body["threepids"])) @@ -1135,7 +1135,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["deactivated"]) self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) @@ -1150,7 +1150,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): content={"erase": True}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Get user channel = self.make_request( @@ -1159,7 +1159,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(True, channel.json_body["deactivated"]) self.assertEqual(0, len(channel.json_body["threepids"])) @@ -1352,7 +1352,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual("User", channel.json_body["displayname"]) self._check_fields(channel.json_body) @@ -1395,7 +1395,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1458,7 +1458,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1486,7 +1486,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): # before limit of monthly active users is reached channel = self.make_request("GET", "/sync", access_token=self.admin_user_tok) - if channel.code != HTTPStatus.OK: + if channel.code != 200: raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"] ) @@ -1684,7 +1684,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"password": "hahaha"}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self._check_fields(channel.json_body) def test_set_displayname(self) -> None: @@ -1700,7 +1700,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"displayname": "foobar"}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual("foobar", channel.json_body["displayname"]) @@ -1711,7 +1711,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual("foobar", channel.json_body["displayname"]) @@ -1733,7 +1733,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(2, len(channel.json_body["threepids"])) # result does not always have the same sort order, therefore it becomes sorted @@ -1759,7 +1759,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(2, len(channel.json_body["threepids"])) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1775,7 +1775,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(2, len(channel.json_body["threepids"])) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1791,7 +1791,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"threepids": []}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(0, len(channel.json_body["threepids"])) self._check_fields(channel.json_body) @@ -1818,7 +1818,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(first_user, channel.json_body["name"]) self.assertEqual(1, len(channel.json_body["threepids"])) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1837,7 +1837,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(1, len(channel.json_body["threepids"])) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1859,7 +1859,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): ) # other user has this two threepids - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(2, len(channel.json_body["threepids"])) # result does not always have the same sort order, therefore it becomes sorted @@ -1878,7 +1878,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): url_first_user, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(first_user, channel.json_body["name"]) self.assertEqual(0, len(channel.json_body["threepids"])) self._check_fields(channel.json_body) @@ -1907,7 +1907,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(2, len(channel.json_body["external_ids"])) # result does not always have the same sort order, therefore it becomes sorted @@ -1939,7 +1939,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(2, len(channel.json_body["external_ids"])) self.assertEqual( @@ -1958,7 +1958,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(2, len(channel.json_body["external_ids"])) self.assertEqual( @@ -1977,7 +1977,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"external_ids": []}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(0, len(channel.json_body["external_ids"])) @@ -2006,7 +2006,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(first_user, channel.json_body["name"]) self.assertEqual(1, len(channel.json_body["external_ids"])) self.assertEqual( @@ -2032,7 +2032,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(1, len(channel.json_body["external_ids"])) self.assertEqual( @@ -2075,7 +2075,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(1, len(channel.json_body["external_ids"])) self.assertEqual( @@ -2093,7 +2093,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(first_user, channel.json_body["name"]) self.assertEqual(1, len(channel.json_body["external_ids"])) self.assertEqual( @@ -2124,7 +2124,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertFalse(channel.json_body["deactivated"]) self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) @@ -2139,7 +2139,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"deactivated": True}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["deactivated"]) self.assertEqual(0, len(channel.json_body["threepids"])) @@ -2158,7 +2158,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["deactivated"]) self.assertEqual(0, len(channel.json_body["threepids"])) @@ -2188,7 +2188,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"deactivated": True}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["deactivated"]) @@ -2204,7 +2204,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"displayname": "Foobar"}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["deactivated"]) self.assertEqual("Foobar", channel.json_body["displayname"]) @@ -2237,7 +2237,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"deactivated": False, "password": "foo"}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertFalse(channel.json_body["deactivated"]) self._is_erased("@user:test", False) @@ -2271,7 +2271,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"deactivated": False}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertFalse(channel.json_body["deactivated"]) self._is_erased("@user:test", False) @@ -2305,7 +2305,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"deactivated": False}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertFalse(channel.json_body["deactivated"]) self._is_erased("@user:test", False) @@ -2326,7 +2326,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"admin": True}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["admin"]) @@ -2337,7 +2337,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["admin"]) @@ -2354,7 +2354,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"user_type": UserTypes.SUPPORT}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(UserTypes.SUPPORT, channel.json_body["user_type"]) @@ -2365,7 +2365,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(UserTypes.SUPPORT, channel.json_body["user_type"]) @@ -2377,7 +2377,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"user_type": None}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertIsNone(channel.json_body["user_type"]) @@ -2388,7 +2388,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertIsNone(channel.json_body["user_type"]) @@ -2418,7 +2418,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("bob", channel.json_body["displayname"]) self.assertEqual(0, channel.json_body["deactivated"]) @@ -2440,7 +2440,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("bob", channel.json_body["displayname"]) @@ -2465,7 +2465,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"deactivated": True}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["deactivated"]) self._is_erased(user_id, False) d = self.store.mark_user_erased(user_id) @@ -2549,7 +2549,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) @@ -2565,7 +2565,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) @@ -2581,7 +2581,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) @@ -2602,7 +2602,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_rooms, channel.json_body["total"]) self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"])) @@ -2649,7 +2649,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual([local_and_remote_room_id], channel.json_body["joined_rooms"]) @@ -2737,7 +2737,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) # Register the pusher @@ -2769,7 +2769,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) for p in channel.json_body["pushers"]: @@ -2865,7 +2865,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_media) self.assertEqual(len(channel.json_body["media"]), 5) self.assertEqual(channel.json_body["next_token"], 5) @@ -2884,7 +2884,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 5) self.assertEqual(len(channel.json_body["deleted_media"]), 5) @@ -2901,7 +2901,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_media) self.assertEqual(len(channel.json_body["media"]), 15) self.assertNotIn("next_token", channel.json_body) @@ -2920,7 +2920,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 15) self.assertEqual(len(channel.json_body["deleted_media"]), 15) @@ -2937,7 +2937,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_media) self.assertEqual(channel.json_body["next_token"], 15) self.assertEqual(len(channel.json_body["media"]), 10) @@ -2956,7 +2956,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 10) self.assertEqual(len(channel.json_body["deleted_media"]), 10) @@ -3023,7 +3023,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_media) self.assertEqual(len(channel.json_body["media"]), number_media) self.assertNotIn("next_token", channel.json_body) @@ -3036,7 +3036,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_media) self.assertEqual(len(channel.json_body["media"]), number_media) self.assertNotIn("next_token", channel.json_body) @@ -3049,7 +3049,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_media) self.assertEqual(len(channel.json_body["media"]), 19) self.assertEqual(channel.json_body["next_token"], 19) @@ -3063,7 +3063,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_media) self.assertEqual(len(channel.json_body["media"]), 1) self.assertNotIn("next_token", channel.json_body) @@ -3080,7 +3080,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["media"])) @@ -3095,7 +3095,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["deleted_media"])) @@ -3112,7 +3112,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_media, channel.json_body["total"]) self.assertEqual(number_media, len(channel.json_body["media"])) self.assertNotIn("next_token", channel.json_body) @@ -3138,7 +3138,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(number_media, channel.json_body["total"]) self.assertEqual(number_media, len(channel.json_body["deleted_media"])) self.assertCountEqual(channel.json_body["deleted_media"], media_ids) @@ -3283,7 +3283,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): # Upload some media into the room response = self.helper.upload_media( - upload_resource, image_data, user_token, filename, expect_code=HTTPStatus.OK + upload_resource, image_data, user_token, filename, expect_code=200 ) # Extract media ID from the response @@ -3301,10 +3301,10 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.OK, + 200, channel.code, msg=( - f"Expected to receive a HTTPStatus.OK on accessing media: {server_and_media_id}" + f"Expected to receive a 200 on accessing media: {server_and_media_id}" ), ) @@ -3350,7 +3350,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], len(expected_media_list)) returned_order = [row["media_id"] for row in channel.json_body["media"]] @@ -3386,7 +3386,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", self.url, b"{}", access_token=self.admin_user_tok ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) return channel.json_body["access_token"] def test_no_auth(self) -> None: @@ -3427,7 +3427,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # We should only see the one device (from the login in `prepare`) self.assertEqual(len(channel.json_body["devices"]), 1) @@ -3439,11 +3439,11 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): # Test that we can successfully make a request channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Logout with the puppet token channel = self.make_request("POST", "logout", b"{}", access_token=puppet_token) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # The puppet token should no longer work channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) @@ -3453,7 +3453,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) def test_user_logout_all(self) -> None: """Tests that the target user calling `/logout/all` does *not* expire @@ -3464,17 +3464,17 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): # Test that we can successfully make a request channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Logout all with the real user token channel = self.make_request( "POST", "logout/all", b"{}", access_token=self.other_user_tok ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # The puppet token should still work channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # .. but the real user's tokens shouldn't channel = self.make_request( @@ -3491,13 +3491,13 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): # Test that we can successfully make a request channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # Logout all with the admin user token channel = self.make_request( "POST", "logout/all", b"{}", access_token=self.admin_user_tok ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) # The puppet token should no longer work channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) @@ -3507,7 +3507,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) @unittest.override_config( { @@ -3635,7 +3635,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(self.other_user, channel.json_body["user_id"]) self.assertIn("devices", channel.json_body) @@ -3650,7 +3650,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): self.url, access_token=other_user_token, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(self.other_user, channel.json_body["user_id"]) self.assertIn("devices", channel.json_body) @@ -3715,7 +3715,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): self.assertFalse(result.shadow_banned) channel = self.make_request("POST", self.url, access_token=self.admin_user_tok) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual({}, channel.json_body) # Ensure the user is shadow-banned (and the cache was cleared). @@ -3727,7 +3727,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): channel = self.make_request( "DELETE", self.url, access_token=self.admin_user_tok ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual({}, channel.json_body) # Ensure the user is no longer shadow-banned (and the cache was cleared). @@ -3891,7 +3891,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["messages_per_second"]) self.assertEqual(0, channel.json_body["burst_count"]) @@ -3905,7 +3905,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertNotIn("messages_per_second", channel.json_body) self.assertNotIn("burst_count", channel.json_body) @@ -3916,7 +3916,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"messages_per_second": 10, "burst_count": 11}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(10, channel.json_body["messages_per_second"]) self.assertEqual(11, channel.json_body["burst_count"]) @@ -3927,7 +3927,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"messages_per_second": 20, "burst_count": 21}, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(20, channel.json_body["messages_per_second"]) self.assertEqual(21, channel.json_body["burst_count"]) @@ -3937,7 +3937,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(20, channel.json_body["messages_per_second"]) self.assertEqual(21, channel.json_body["burst_count"]) @@ -3947,7 +3947,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertNotIn("messages_per_second", channel.json_body) self.assertNotIn("burst_count", channel.json_body) @@ -3957,7 +3957,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertNotIn("messages_per_second", channel.json_body) self.assertNotIn("burst_count", channel.json_body) @@ -4042,7 +4042,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): self.url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual( {"a": 1}, channel.json_body["account_data"]["global"]["m.global"] ) diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py index b21f6d4689..b5e7eecf87 100644 --- a/tests/rest/admin/test_username_available.py +++ b/tests/rest/admin/test_username_available.py @@ -50,18 +50,18 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase): def test_username_available(self) -> None: """ - The endpoint should return a HTTPStatus.OK response if the username does not exist + The endpoint should return a 200 response if the username does not exist """ url = "%s?username=%s" % (self.url, "allowed") channel = self.make_request("GET", url, access_token=self.admin_user_tok) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["available"]) def test_username_unavailable(self) -> None: """ - The endpoint should return a HTTPStatus.OK response if the username does not exist + The endpoint should return a 200 response if the username does not exist """ url = "%s?username=%s" % (self.url, "disallowed") -- cgit 1.4.1 From 0f954466c43946b8fdfac9a1b69873275a5e1207 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 8 Aug 2022 13:44:43 -0700 Subject: Update matrix-synapse-ldap3 version in lockfile to v0.2.2. (#13470) --- changelog.d/13470.misc | 2 ++ poetry.lock | 24 ++++++++++++------------ 2 files changed, 14 insertions(+), 12 deletions(-) create mode 100644 changelog.d/13470.misc diff --git a/changelog.d/13470.misc b/changelog.d/13470.misc new file mode 100644 index 0000000000..33cfa58167 --- /dev/null +++ b/changelog.d/13470.misc @@ -0,0 +1,2 @@ +Update the version of matrix-ldap-3 included in the matrixdotorg/synapse DockerHub images and the Debian packages +hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the plugin. diff --git a/poetry.lock b/poetry.lock index b62c24ae16..1acdb5da56 100644 --- a/poetry.lock +++ b/poetry.lock @@ -177,7 +177,7 @@ optional = false python-versions = "*" [package.extras] -test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] +test = ["hypothesis (==3.55.3)", "flake8 (==3.7.8)"] [[package]] name = "constantly" @@ -435,8 +435,8 @@ optional = false python-versions = ">=3.6" [package.extras] -test = ["pytest", "pytest-trio", "pytest-asyncio", "testpath", "trio", "async-timeout"] -trio = ["trio", "async-generator"] +trio = ["async-generator", "trio"] +test = ["async-timeout", "trio", "testpath", "pytest-asyncio", "pytest-trio", "pytest"] [[package]] name = "jinja2" @@ -535,12 +535,12 @@ attrs = "*" importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""} [package.extras] -dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"] -test = ["tox", "twisted", "aiounittest"] +test = ["aiounittest", "twisted", "tox"] +dev = ["twine (==4.0.1)", "build (==0.8.0)", "isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "mypy (==0.910)", "aiounittest", "twisted", "tox"] [[package]] name = "matrix-synapse-ldap3" -version = "0.2.1" +version = "0.2.2" description = "An LDAP3 auth provider for Synapse" category = "main" optional = true @@ -552,7 +552,7 @@ service-identity = "*" Twisted = ">=15.1.0" [package.extras] -dev = ["matrix-synapse", "tox", "ldaptor", "mypy (==0.910)", "types-setuptools", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)"] +dev = ["isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "types-setuptools", "mypy (==0.910)", "ldaptor", "tox", "matrix-synapse"] [[package]] name = "mccabe" @@ -820,10 +820,10 @@ optional = false python-versions = ">=3.6" [package.extras] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +docs = ["zope.interface", "sphinx-rtd-theme", "sphinx"] +dev = ["pre-commit", "mypy", "coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)", "cryptography (>=3.3.1)", "zope.interface", "sphinx-rtd-theme", "sphinx"] crypto = ["cryptography (>=3.3.1)"] -dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"] -docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] -tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"] [[package]] name = "pymacaroons" @@ -2055,8 +2055,8 @@ matrix-common = [ {file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"}, ] matrix-synapse-ldap3 = [ - {file = "matrix-synapse-ldap3-0.2.1.tar.gz", hash = "sha256:bfb4390f4a262ffb0d6f057ff3aeb1e46d4e52ff420a064d795fb4f555f00285"}, - {file = "matrix_synapse_ldap3-0.2.1-py3-none-any.whl", hash = "sha256:1b3310a60f1d06466f35905a269b6df95747fd1305f2b7fe638f373963b2aa2c"}, + {file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"}, + {file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"}, ] mccabe = [ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, -- cgit 1.4.1 From 70d3e7000910f0171f43fb4d2450121eea1214c7 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 9 Aug 2022 12:30:22 +0200 Subject: Make the configuration for the cache clearer (#13481) --- changelog.d/13481.doc | 1 + docs/usage/configuration/config_documentation.md | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13481.doc diff --git a/changelog.d/13481.doc b/changelog.d/13481.doc new file mode 100644 index 0000000000..b07a6a8a9e --- /dev/null +++ b/changelog.d/13481.doc @@ -0,0 +1 @@ +Make the configuration for the cache clearer. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 3a9466a837..2af32a6155 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1056,26 +1056,26 @@ allow_device_name_lookup_over_federation: true --- ## Caching ## -Options related to caching +Options related to caching. --- ### `event_cache_size` The number of events to cache in memory. Not affected by -`caches.global_factor`. Defaults to 10K. +`caches.global_factor` and is not part of the `caches` section. Defaults to 10K. Example configuration: ```yaml event_cache_size: 15K ``` --- -### `cache` and associated values +### `caches` and associated values A cache 'factor' is a multiplier that can be applied to each of Synapse's caches in order to increase or decrease the maximum number of entries that can be stored. -Caching can be configured through the following sub-options: +`caches` can be configured through the following sub-options: * `global_factor`: Controls the global cache factor, which is the default cache factor for all caches if a specific factor for that cache is not otherwise @@ -1137,6 +1137,7 @@ Caching can be configured through the following sub-options: Example configuration: ```yaml +event_cache_size: 15K caches: global_factor: 1.0 per_cache_factors: -- cgit 1.4.1 From 827f0669bfb330b3aa5b09fe42226658a693ef8e Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 9 Aug 2022 11:43:30 +0100 Subject: 1.65.0rc1 --- CHANGES.md | 75 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/11897.doc | 1 - changelog.d/12978.misc | 1 - changelog.d/13160.misc | 1 - changelog.d/13213.misc | 1 - changelog.d/13221.doc | 1 - changelog.d/13230.doc | 1 - changelog.d/13273.feature | 1 - changelog.d/13343.feature | 1 - changelog.d/13346.misc | 1 - changelog.d/13353.bugfix | 1 - changelog.d/13355.misc | 1 - changelog.d/13365.bugfix | 1 - changelog.d/13368.misc | 1 - changelog.d/13370.feature | 1 - changelog.d/13372.docker | 1 - changelog.d/13374.bugfix | 1 - changelog.d/13383.misc | 1 - changelog.d/13392.bugfix | 1 - changelog.d/13393.misc | 1 - changelog.d/13397.misc | 1 - changelog.d/13403.misc | 1 - changelog.d/13404.misc | 1 - changelog.d/13408.bugfix | 1 - changelog.d/13413.bugfix | 1 - changelog.d/13416.misc | 1 - changelog.d/13420.misc | 1 - changelog.d/13428.feature | 1 - changelog.d/13429.feature | 1 - changelog.d/13431.misc | 1 - changelog.d/13432.bugfix | 1 - changelog.d/13435.misc | 1 - changelog.d/13437.doc | 1 - changelog.d/13438.doc | 1 - changelog.d/13439.misc | 1 - changelog.d/13441.feature | 1 - changelog.d/13442.misc | 1 - changelog.d/13443.doc | 1 - changelog.d/13445.misc | 1 - changelog.d/13447.misc | 1 - changelog.d/13449.doc | 1 - changelog.d/13450.doc | 1 - changelog.d/13452.misc | 1 - changelog.d/13455.misc | 1 - changelog.d/13460.misc | 1 - changelog.d/13463.misc | 1 - changelog.d/13467.misc | 1 - changelog.d/13469.misc | 1 - changelog.d/13470.misc | 2 -- changelog.d/13481.doc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 52 files changed, 82 insertions(+), 51 deletions(-) delete mode 100644 changelog.d/11897.doc delete mode 100644 changelog.d/12978.misc delete mode 100644 changelog.d/13160.misc delete mode 100644 changelog.d/13213.misc delete mode 100644 changelog.d/13221.doc delete mode 100644 changelog.d/13230.doc delete mode 100644 changelog.d/13273.feature delete mode 100644 changelog.d/13343.feature delete mode 100644 changelog.d/13346.misc delete mode 100644 changelog.d/13353.bugfix delete mode 100644 changelog.d/13355.misc delete mode 100644 changelog.d/13365.bugfix delete mode 100644 changelog.d/13368.misc delete mode 100644 changelog.d/13370.feature delete mode 100644 changelog.d/13372.docker delete mode 100644 changelog.d/13374.bugfix delete mode 100644 changelog.d/13383.misc delete mode 100644 changelog.d/13392.bugfix delete mode 100644 changelog.d/13393.misc delete mode 100644 changelog.d/13397.misc delete mode 100644 changelog.d/13403.misc delete mode 100644 changelog.d/13404.misc delete mode 100644 changelog.d/13408.bugfix delete mode 100644 changelog.d/13413.bugfix delete mode 100644 changelog.d/13416.misc delete mode 100644 changelog.d/13420.misc delete mode 100644 changelog.d/13428.feature delete mode 100644 changelog.d/13429.feature delete mode 100644 changelog.d/13431.misc delete mode 100644 changelog.d/13432.bugfix delete mode 100644 changelog.d/13435.misc delete mode 100644 changelog.d/13437.doc delete mode 100644 changelog.d/13438.doc delete mode 100644 changelog.d/13439.misc delete mode 100644 changelog.d/13441.feature delete mode 100644 changelog.d/13442.misc delete mode 100644 changelog.d/13443.doc delete mode 100644 changelog.d/13445.misc delete mode 100644 changelog.d/13447.misc delete mode 100644 changelog.d/13449.doc delete mode 100644 changelog.d/13450.doc delete mode 100644 changelog.d/13452.misc delete mode 100644 changelog.d/13455.misc delete mode 100644 changelog.d/13460.misc delete mode 100644 changelog.d/13463.misc delete mode 100644 changelog.d/13467.misc delete mode 100644 changelog.d/13469.misc delete mode 100644 changelog.d/13470.misc delete mode 100644 changelog.d/13481.doc diff --git a/CHANGES.md b/CHANGES.md index 0e69f25e0e..1a28e3e375 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,78 @@ +Synapse 1.65.0rc1 (2022-08-09) +============================== + +Features +-------- + +- Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273)) +- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in MSC3848. ([\#13343](https://github.com/matrix-org/synapse/issues/13343)) +- Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370)) +- Add a module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428)) +- Add a module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429)) +- Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441)) + + +Bugfixes +-------- + +- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353)) +- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13365](https://github.com/matrix-org/synapse/issues/13365)) +- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374)) +- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392)) +- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408)) +- Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413)) +- Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432)) + + +Updates to the Docker image +--------------------------- + +- Make Docker images build on armv7 by installing cryptography dependencies in the 'requirements' stage. Contributed by Jasper Spaans. ([\#13372](https://github.com/matrix-org/synapse/issues/13372)) + + +Improved Documentation +---------------------- + +- Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. ([\#11897](https://github.com/matrix-org/synapse/issues/11897)) +- Document which HTTP resources support gzip compression. ([\#13221](https://github.com/matrix-org/synapse/issues/13221)) +- Add steps describing how to elevate an existing user to administrator by manipulating the database. ([\#13230](https://github.com/matrix-org/synapse/issues/13230)) +- Fix wrong headline for `url_preview_accept_language` in documentation. ([\#13437](https://github.com/matrix-org/synapse/issues/13437)) +- Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. ([\#13438](https://github.com/matrix-org/synapse/issues/13438)) +- Update documentation for config setting `macaroon_secret_key`. ([\#13443](https://github.com/matrix-org/synapse/issues/13443)) +- Update outdated information on `sso_mapping_providers` documentation. ([\#13449](https://github.com/matrix-org/synapse/issues/13449)) +- Fix example code in module documentation of `password_auth_provider_callbacks`. ([\#13450](https://github.com/matrix-org/synapse/issues/13450)) +- Make the configuration for the cache clearer. ([\#13481](https://github.com/matrix-org/synapse/issues/13481)) + + +Internal Changes +---------------- + +- Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. ([\#12978](https://github.com/matrix-org/synapse/issues/12978)) +- Make minor clarifications to the error messages given when we fail to join a room via any server. ([\#13160](https://github.com/matrix-org/synapse/issues/13160)) +- Enable Complement CI tests in the 'latest deps' test run. ([\#13213](https://github.com/matrix-org/synapse/issues/13213)) +- Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. ([\#13346](https://github.com/matrix-org/synapse/issues/13346)) +- Faster room joins: avoid blocking when pulling events with partially missing prev events. ([\#13355](https://github.com/matrix-org/synapse/issues/13355)) +- Instrument `/messages` for understandable traces in Jaeger. ([\#13368](https://github.com/matrix-org/synapse/issues/13368)) +- Remove an unused argument to `get_relations_for_event`. ([\#13383](https://github.com/matrix-org/synapse/issues/13383)) +- Add a `merge-back` command to the release script, which automates merging the correct branches after a release. ([\#13393](https://github.com/matrix-org/synapse/issues/13393)) +- Adding missing type hints to tests. ([\#13397](https://github.com/matrix-org/synapse/issues/13397)) +- Faster Room Joins: don't leave a stuck room partial state flag if the join fails. ([\#13403](https://github.com/matrix-org/synapse/issues/13403)) +- Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. ([\#13404](https://github.com/matrix-org/synapse/issues/13404), [\#13431](https://github.com/matrix-org/synapse/issues/13431)) +- Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. ([\#13416](https://github.com/matrix-org/synapse/issues/13416)) +- Re-enable running Complement tests against Synapse with workers. ([\#13420](https://github.com/matrix-org/synapse/issues/13420)) +- Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13435](https://github.com/matrix-org/synapse/issues/13435)) +- Add some tracing to give more insight into local room joins. ([\#13439](https://github.com/matrix-org/synapse/issues/13439)) +- Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. ([\#13442](https://github.com/matrix-org/synapse/issues/13442)) +- Add some comments about how event push actions are stored. ([\#13445](https://github.com/matrix-org/synapse/issues/13445), [\#13455](https://github.com/matrix-org/synapse/issues/13455)) +- Improve rebuild speed for the "synapse-workers" docker image. ([\#13447](https://github.com/matrix-org/synapse/issues/13447)) +- Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). ([\#13452](https://github.com/matrix-org/synapse/issues/13452)) +- Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460)) +- Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469)) +- Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467)) +- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages + hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470)) + + Synapse 1.64.0 (2022-08-02) =========================== diff --git a/changelog.d/11897.doc b/changelog.d/11897.doc deleted file mode 100644 index d86b20f53d..0000000000 --- a/changelog.d/11897.doc +++ /dev/null @@ -1 +0,0 @@ -Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. diff --git a/changelog.d/12978.misc b/changelog.d/12978.misc deleted file mode 100644 index 050c9047fc..0000000000 --- a/changelog.d/12978.misc +++ /dev/null @@ -1 +0,0 @@ -Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. \ No newline at end of file diff --git a/changelog.d/13160.misc b/changelog.d/13160.misc deleted file mode 100644 index 36ff50c2a6..0000000000 --- a/changelog.d/13160.misc +++ /dev/null @@ -1 +0,0 @@ -Make minor clarifications to the error messages given when we fail to join a room via any server. \ No newline at end of file diff --git a/changelog.d/13213.misc b/changelog.d/13213.misc deleted file mode 100644 index b50d26ac0c..0000000000 --- a/changelog.d/13213.misc +++ /dev/null @@ -1 +0,0 @@ -Enable Complement CI tests in the 'latest deps' test run. \ No newline at end of file diff --git a/changelog.d/13221.doc b/changelog.d/13221.doc deleted file mode 100644 index dd2b3d8972..0000000000 --- a/changelog.d/13221.doc +++ /dev/null @@ -1 +0,0 @@ -Document which HTTP resources support gzip compression. diff --git a/changelog.d/13230.doc b/changelog.d/13230.doc deleted file mode 100644 index dce7be2425..0000000000 --- a/changelog.d/13230.doc +++ /dev/null @@ -1 +0,0 @@ -Add steps describing how to elevate an existing user to administrator by manipulating the database. diff --git a/changelog.d/13273.feature b/changelog.d/13273.feature deleted file mode 100644 index 53110d74e9..0000000000 --- a/changelog.d/13273.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). diff --git a/changelog.d/13343.feature b/changelog.d/13343.feature deleted file mode 100644 index c151251e54..0000000000 --- a/changelog.d/13343.feature +++ /dev/null @@ -1 +0,0 @@ -Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in MSC3848. \ No newline at end of file diff --git a/changelog.d/13346.misc b/changelog.d/13346.misc deleted file mode 100644 index 06557c8481..0000000000 --- a/changelog.d/13346.misc +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. diff --git a/changelog.d/13353.bugfix b/changelog.d/13353.bugfix deleted file mode 100644 index 8e18bfae1f..0000000000 --- a/changelog.d/13353.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. diff --git a/changelog.d/13355.misc b/changelog.d/13355.misc deleted file mode 100644 index 7715075885..0000000000 --- a/changelog.d/13355.misc +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: avoid blocking when pulling events with partially missing prev events. diff --git a/changelog.d/13365.bugfix b/changelog.d/13365.bugfix deleted file mode 100644 index b915c3158c..0000000000 --- a/changelog.d/13365.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). diff --git a/changelog.d/13368.misc b/changelog.d/13368.misc deleted file mode 100644 index 4b433a5107..0000000000 --- a/changelog.d/13368.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument `/messages` for understandable traces in Jaeger. diff --git a/changelog.d/13370.feature b/changelog.d/13370.feature deleted file mode 100644 index 3a49bc2778..0000000000 --- a/changelog.d/13370.feature +++ /dev/null @@ -1 +0,0 @@ -Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). diff --git a/changelog.d/13372.docker b/changelog.d/13372.docker deleted file mode 100644 index 238c78de09..0000000000 --- a/changelog.d/13372.docker +++ /dev/null @@ -1 +0,0 @@ -Make docker images build on armv7 by installing cryptography dependencies in the "requirements" stage. Contributed by Jasper Spaans. \ No newline at end of file diff --git a/changelog.d/13374.bugfix b/changelog.d/13374.bugfix deleted file mode 100644 index 1c5bd1b363..0000000000 --- a/changelog.d/13374.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. \ No newline at end of file diff --git a/changelog.d/13383.misc b/changelog.d/13383.misc deleted file mode 100644 index 2236eced24..0000000000 --- a/changelog.d/13383.misc +++ /dev/null @@ -1 +0,0 @@ -Remove an unused argument to `get_relations_for_event`. diff --git a/changelog.d/13392.bugfix b/changelog.d/13392.bugfix deleted file mode 100644 index 7d83c77550..0000000000 --- a/changelog.d/13392.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13393.misc b/changelog.d/13393.misc deleted file mode 100644 index be2b0153ea..0000000000 --- a/changelog.d/13393.misc +++ /dev/null @@ -1 +0,0 @@ -Add a `merge-back` command to the release script, which automates merging the correct branches after a release. \ No newline at end of file diff --git a/changelog.d/13397.misc b/changelog.d/13397.misc deleted file mode 100644 index 8dc610d9e2..0000000000 --- a/changelog.d/13397.misc +++ /dev/null @@ -1 +0,0 @@ -Adding missing type hints to tests. diff --git a/changelog.d/13403.misc b/changelog.d/13403.misc deleted file mode 100644 index cb7b38153c..0000000000 --- a/changelog.d/13403.misc +++ /dev/null @@ -1 +0,0 @@ -Faster Room Joins: don't leave a stuck room partial state flag if the join fails. diff --git a/changelog.d/13404.misc b/changelog.d/13404.misc deleted file mode 100644 index 655be4061b..0000000000 --- a/changelog.d/13404.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. diff --git a/changelog.d/13408.bugfix b/changelog.d/13408.bugfix deleted file mode 100644 index 8b87b2cf7b..0000000000 --- a/changelog.d/13408.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. diff --git a/changelog.d/13413.bugfix b/changelog.d/13413.bugfix deleted file mode 100644 index a0ce884274..0000000000 --- a/changelog.d/13413.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. \ No newline at end of file diff --git a/changelog.d/13416.misc b/changelog.d/13416.misc deleted file mode 100644 index 2904e73376..0000000000 --- a/changelog.d/13416.misc +++ /dev/null @@ -1 +0,0 @@ -Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. \ No newline at end of file diff --git a/changelog.d/13420.misc b/changelog.d/13420.misc deleted file mode 100644 index ff1a68e2e8..0000000000 --- a/changelog.d/13420.misc +++ /dev/null @@ -1 +0,0 @@ -Re-enable running Complement tests against Synapse with workers. \ No newline at end of file diff --git a/changelog.d/13428.feature b/changelog.d/13428.feature deleted file mode 100644 index 085b61483f..0000000000 --- a/changelog.d/13428.feature +++ /dev/null @@ -1 +0,0 @@ -Add a module API method to translate a room alias into a room ID. diff --git a/changelog.d/13429.feature b/changelog.d/13429.feature deleted file mode 100644 index f4f347e54e..0000000000 --- a/changelog.d/13429.feature +++ /dev/null @@ -1 +0,0 @@ -Add a module API method to create a room. diff --git a/changelog.d/13431.misc b/changelog.d/13431.misc deleted file mode 100644 index 655be4061b..0000000000 --- a/changelog.d/13431.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. diff --git a/changelog.d/13432.bugfix b/changelog.d/13432.bugfix deleted file mode 100644 index bb99616afc..0000000000 --- a/changelog.d/13432.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: Fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. diff --git a/changelog.d/13435.misc b/changelog.d/13435.misc deleted file mode 100644 index c01b9136c8..0000000000 --- a/changelog.d/13435.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13437.doc b/changelog.d/13437.doc deleted file mode 100644 index fb772b24dc..0000000000 --- a/changelog.d/13437.doc +++ /dev/null @@ -1 +0,0 @@ -Fix wrong headline for `url_preview_accept_language` in documentation. diff --git a/changelog.d/13438.doc b/changelog.d/13438.doc deleted file mode 100644 index 163b63ffc6..0000000000 --- a/changelog.d/13438.doc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. diff --git a/changelog.d/13439.misc b/changelog.d/13439.misc deleted file mode 100644 index 4aa73d7075..0000000000 --- a/changelog.d/13439.misc +++ /dev/null @@ -1 +0,0 @@ -Add some tracing to give more insight into local room joins. diff --git a/changelog.d/13441.feature b/changelog.d/13441.feature deleted file mode 100644 index 3a4ae8bf01..0000000000 --- a/changelog.d/13441.feature +++ /dev/null @@ -1 +0,0 @@ -Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). diff --git a/changelog.d/13442.misc b/changelog.d/13442.misc deleted file mode 100644 index f503bc79d3..0000000000 --- a/changelog.d/13442.misc +++ /dev/null @@ -1 +0,0 @@ -Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. \ No newline at end of file diff --git a/changelog.d/13443.doc b/changelog.d/13443.doc deleted file mode 100644 index 0db5d1b3b4..0000000000 --- a/changelog.d/13443.doc +++ /dev/null @@ -1 +0,0 @@ -Update documentation for config setting `macaroon_secret_key`. \ No newline at end of file diff --git a/changelog.d/13445.misc b/changelog.d/13445.misc deleted file mode 100644 index 17462c56f3..0000000000 --- a/changelog.d/13445.misc +++ /dev/null @@ -1 +0,0 @@ -Add some comments about how event push actions are stored. diff --git a/changelog.d/13447.misc b/changelog.d/13447.misc deleted file mode 100644 index ede3ee91b8..0000000000 --- a/changelog.d/13447.misc +++ /dev/null @@ -1 +0,0 @@ -Improve rebuild speed for the "synapse-workers" docker image. diff --git a/changelog.d/13449.doc b/changelog.d/13449.doc deleted file mode 100644 index cbe4f62b6e..0000000000 --- a/changelog.d/13449.doc +++ /dev/null @@ -1 +0,0 @@ -Update outdated information on `sso_mapping_providers` documentation. diff --git a/changelog.d/13450.doc b/changelog.d/13450.doc deleted file mode 100644 index e272baff0a..0000000000 --- a/changelog.d/13450.doc +++ /dev/null @@ -1 +0,0 @@ -Fix example code in module documentation of `password_auth_provider_callbacks`. diff --git a/changelog.d/13452.misc b/changelog.d/13452.misc deleted file mode 100644 index 13d1523de2..0000000000 --- a/changelog.d/13452.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). diff --git a/changelog.d/13455.misc b/changelog.d/13455.misc deleted file mode 100644 index 17462c56f3..0000000000 --- a/changelog.d/13455.misc +++ /dev/null @@ -1 +0,0 @@ -Add some comments about how event push actions are stored. diff --git a/changelog.d/13460.misc b/changelog.d/13460.misc deleted file mode 100644 index f9e9de219d..0000000000 --- a/changelog.d/13460.misc +++ /dev/null @@ -1 +0,0 @@ -Update type of `EventContext.rejected`. diff --git a/changelog.d/13463.misc b/changelog.d/13463.misc deleted file mode 100644 index a4c8691144..0000000000 --- a/changelog.d/13463.misc +++ /dev/null @@ -1 +0,0 @@ -Use literals in place of `HTTPStatus` constants in tests. diff --git a/changelog.d/13467.misc b/changelog.d/13467.misc deleted file mode 100644 index b8b7d65c16..0000000000 --- a/changelog.d/13467.misc +++ /dev/null @@ -1 +0,0 @@ -Correct a misnamed argument in state res v2 internals. diff --git a/changelog.d/13469.misc b/changelog.d/13469.misc deleted file mode 100644 index 315930deab..0000000000 --- a/changelog.d/13469.misc +++ /dev/null @@ -1 +0,0 @@ -Use literals in place of `HTTPStatus` constants in tests. \ No newline at end of file diff --git a/changelog.d/13470.misc b/changelog.d/13470.misc deleted file mode 100644 index 33cfa58167..0000000000 --- a/changelog.d/13470.misc +++ /dev/null @@ -1,2 +0,0 @@ -Update the version of matrix-ldap-3 included in the matrixdotorg/synapse DockerHub images and the Debian packages -hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the plugin. diff --git a/changelog.d/13481.doc b/changelog.d/13481.doc deleted file mode 100644 index b07a6a8a9e..0000000000 --- a/changelog.d/13481.doc +++ /dev/null @@ -1 +0,0 @@ -Make the configuration for the cache clearer. diff --git a/debian/changelog b/debian/changelog index 9efcb4f132..6e4bdc3a00 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.65.0~rc1) stable; urgency=medium + + * New Synapse release 1.65.0rc1. + + -- Synapse Packaging team Tue, 09 Aug 2022 11:39:29 +0100 + matrix-synapse-py3 (1.64.0) stable; urgency=medium * New Synapse release 1.64.0. diff --git a/pyproject.toml b/pyproject.toml index af7def0c53..ad2224f564 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.64.0" +version = "1.65.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From c962f87d6f597637b281bb5f256d5cfb084b8d25 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 9 Aug 2022 12:54:59 +0100 Subject: Tweak the changelog --- CHANGES.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1a28e3e375..b38c6e0910 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,23 +5,25 @@ Features -------- - Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273)) -- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in MSC3848. ([\#13343](https://github.com/matrix-org/synapse/issues/13343)) +- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in [MSC3848](https://github.com/matrix-org/matrix-spec-proposals/pull/3848). ([\#13343](https://github.com/matrix-org/synapse/issues/13343)) - Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370)) -- Add a module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428)) -- Add a module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429)) +- Add a new module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428)) +- Add a new module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429)) - Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441)) Bugfixes -------- -- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353)) +- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages - Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13365](https://github.com/matrix-org/synapse/issues/13365)) - Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374)) - Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392)) - Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408)) +- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353)) - Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413)) - Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432)) + hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470)) Updates to the Docker image @@ -69,8 +71,6 @@ Internal Changes - Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460)) - Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469)) - Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467)) -- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages - hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470)) Synapse 1.64.0 (2022-08-02) -- cgit 1.4.1 From 5ce2887653f9905f1ce13f53149c1e713314a28b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 9 Aug 2022 07:56:16 -0400 Subject: Strengthen tests about deleted old push actions. (#13471) --- changelog.d/13471.misc | 1 + tests/storage/test_event_push_actions.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 changelog.d/13471.misc diff --git a/changelog.d/13471.misc b/changelog.d/13471.misc new file mode 100644 index 0000000000..b55ff32c76 --- /dev/null +++ b/changelog.d/13471.misc @@ -0,0 +1 @@ +Clean-up tests for notifications. diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index ba40124c8a..62fd4aeb2f 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -135,7 +135,22 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): _assert_counts(1, 1, 0) # Delete old event push actions, this should not affect the (summarised) count. + # + # All event push actions are kept for 24 hours, so need to move forward + # in time. + self.pump(60 * 60 * 24) self.get_success(self.store._remove_old_push_actions_that_have_rotated()) + # Double check that the event push actions have been cleared (i.e. that + # any results *must* come from the summary). + result = self.get_success( + self.store.db_pool.simple_select_list( + table="event_push_actions", + keyvalues={"1": 1}, + retcols=("event_id",), + desc="", + ) + ) + self.assertEqual(result, []) _assert_counts(1, 1, 0) _mark_read(last_event_id) -- cgit 1.4.1 From 3d1b860f900e687593b152317b8bef22cfc1caae Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 9 Aug 2022 13:13:22 +0100 Subject: Fix changelog mistake --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b38c6e0910..823f4f40b3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,7 +15,7 @@ Features Bugfixes -------- -- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages +- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470)) - Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13365](https://github.com/matrix-org/synapse/issues/13365)) - Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374)) - Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392)) @@ -23,7 +23,6 @@ Bugfixes - Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353)) - Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413)) - Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432)) - hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470)) Updates to the Docker image -- cgit 1.4.1 From 1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 9 Aug 2022 15:56:43 +0200 Subject: Use literals in place of `HTTPStatus` constants in tests (#13479) Replace - `HTTPStatus.NOT_FOUND` - `HTTPStatus.FORBIDDEN` - `HTTPStatus.UNAUTHORIZED` - `HTTPStatus.CONFLICT` - `HTTPStatus.CREATED` Signed-off-by: Dirk Klimpel --- changelog.d/13479.misc | 1 + tests/rest/admin/test_admin.py | 7 +- tests/rest/admin/test_background_updates.py | 4 +- tests/rest/admin/test_device.py | 28 ++++---- tests/rest/admin/test_event_reports.py | 16 ++--- tests/rest/admin/test_federation.py | 10 +-- tests/rest/admin/test_media.py | 32 ++++----- tests/rest/admin/test_registration_tokens.py | 26 +++---- tests/rest/admin/test_room.py | 40 +++++------ tests/rest/admin/test_server_notice.py | 8 +-- tests/rest/admin/test_statistics.py | 6 +- tests/rest/admin/test_user.py | 104 +++++++++++++-------------- 12 files changed, 141 insertions(+), 141 deletions(-) create mode 100644 changelog.d/13479.misc diff --git a/changelog.d/13479.misc b/changelog.d/13479.misc new file mode 100644 index 0000000000..315930deab --- /dev/null +++ b/changelog.d/13479.misc @@ -0,0 +1 @@ +Use literals in place of `HTTPStatus` constants in tests. \ No newline at end of file diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 06e74d5e58..a8f6436836 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -13,7 +13,6 @@ # limitations under the License. import urllib.parse -from http import HTTPStatus from parameterized import parameterized @@ -79,10 +78,10 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): # Should be quarantined self.assertEqual( - HTTPStatus.NOT_FOUND, + 404, channel.code, msg=( - "Expected to receive a HTTPStatus.NOT_FOUND on accessing quarantined media: %s" + "Expected to receive a 404 on accessing quarantined media: %s" % server_and_media_id ), ) @@ -107,7 +106,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): # Expect a forbidden error self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg="Expected forbidden on quarantining media as a non-admin", ) diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index 8295ecf248..7cd8b52f02 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -51,7 +51,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): ) def test_requester_is_no_admin(self, method: str, url: str) -> None: """ - If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. + If the user is not a server admin, an error 403 is returned. """ self.register_user("user", "pass", admin=False) @@ -64,7 +64,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): access_token=other_user_tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_invalid_parameter(self) -> None: diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index 779f1bfac1..6a6e8ad7d8 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -58,7 +58,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): channel = self.make_request(method, self.url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -76,7 +76,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -85,7 +85,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): @parameterized.expand(["GET", "PUT", "DELETE"]) def test_user_does_not_exist(self, method: str) -> None: """ - Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND + Tests that a lookup for a user that does not exist returns a 404 """ url = ( "/_synapse/admin/v2/users/@unknown_person:test/devices/%s" @@ -98,7 +98,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @parameterized.expand(["GET", "PUT", "DELETE"]) @@ -122,7 +122,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): def test_unknown_device(self) -> None: """ - Tests that a lookup for a device that does not exist returns either HTTPStatus.NOT_FOUND or 200. + Tests that a lookup for a device that does not exist returns either 404 or 200. """ url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote( self.other_user @@ -134,7 +134,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) channel = self.make_request( @@ -312,7 +312,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", self.url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -331,7 +331,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -339,7 +339,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): def test_user_does_not_exist(self) -> None: """ - Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND + Tests that a lookup for a user that does not exist returns a 404 """ url = "/_synapse/admin/v2/users/@unknown_person:test/devices" channel = self.make_request( @@ -348,7 +348,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_user_is_not_local(self) -> None: @@ -438,7 +438,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -457,7 +457,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -465,7 +465,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): def test_user_does_not_exist(self) -> None: """ - Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND + Tests that a lookup for a user that does not exist returns a 404 """ url = "/_synapse/admin/v2/users/@unknown_person:test/delete_devices" channel = self.make_request( @@ -474,7 +474,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_user_is_not_local(self) -> None: diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 9bc6ce62cb..c63a86d5b3 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -82,7 +82,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", self.url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -90,7 +90,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): def test_requester_is_no_admin(self) -> None: """ - If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. + If the user is not a server admin, an error 403 is returned. """ channel = self.make_request( @@ -100,7 +100,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -467,7 +467,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", self.url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -475,7 +475,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): def test_requester_is_no_admin(self) -> None: """ - If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. + If the user is not a server admin, an error 403 is returned. """ channel = self.make_request( @@ -485,7 +485,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -566,7 +566,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): def test_report_id_not_found(self) -> None: """ - Testing that a not existing `report_id` returns a HTTPStatus.NOT_FOUND. + Testing that a not existing `report_id` returns a 404. """ channel = self.make_request( @@ -576,7 +576,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.NOT_FOUND, + 404, channel.code, msg=channel.json_body, ) diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index c3927c2735..8affd830d1 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -64,7 +64,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=other_user_tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_invalid_parameter(self) -> None: @@ -117,7 +117,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) # invalid destination @@ -127,7 +127,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_limit(self) -> None: @@ -561,7 +561,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=other_user_tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_invalid_parameter(self) -> None: @@ -604,7 +604,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_limit(self) -> None: diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 92fd6c780d..d51c10a515 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -60,7 +60,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): channel = self.make_request("DELETE", url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -82,7 +82,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -90,7 +90,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): def test_media_does_not_exist(self) -> None: """ - Tests that a lookup for a media that does not exist returns a HTTPStatus.NOT_FOUND + Tests that a lookup for a media that does not exist returns a 404 """ url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345") @@ -100,7 +100,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_media_is_not_local(self) -> None: @@ -188,10 +188,10 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) self.assertEqual( - HTTPStatus.NOT_FOUND, + 404, channel.code, msg=( - "Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s" + "Expected to receive a 404 on accessing deleted media: %s" % server_and_media_id ), ) @@ -231,7 +231,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -251,7 +251,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -612,10 +612,10 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.assertTrue(os.path.exists(local_path)) else: self.assertEqual( - HTTPStatus.NOT_FOUND, + 404, channel.code, msg=( - "Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s" + "Expected to receive a 404 on accessing deleted media: %s" % (server_and_media_id) ), ) @@ -668,7 +668,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -689,7 +689,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -801,7 +801,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url % (action, self.media_id), b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -822,7 +822,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -894,7 +894,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -914,7 +914,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py index 544daaa4c8..bcb602382c 100644 --- a/tests/rest/admin/test_registration_tokens.py +++ b/tests/rest/admin/test_registration_tokens.py @@ -75,7 +75,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): """Try to create a token without authentication.""" channel = self.make_request("POST", self.url + "/new", {}) self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -90,7 +90,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -390,7 +390,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {}, ) self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -405,7 +405,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -421,7 +421,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.NOT_FOUND, + 404, channel.code, msg=channel.json_body, ) @@ -606,7 +606,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {}, ) self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -621,7 +621,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -637,7 +637,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.NOT_FOUND, + 404, channel.code, msg=channel.json_body, ) @@ -667,7 +667,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {}, ) self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -682,7 +682,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -698,7 +698,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.NOT_FOUND, + 404, channel.code, msg=channel.json_body, ) @@ -729,7 +729,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): """Try to list tokens without authentication.""" channel = self.make_request("GET", self.url, {}) self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -744,7 +744,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 6ea7858db7..8350f9025a 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -68,7 +68,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): def test_requester_is_no_admin(self) -> None: """ - If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. + If the user is not a server admin, an error 403 is returned. """ channel = self.make_request( @@ -78,7 +78,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_room_does_not_exist(self) -> None: @@ -319,7 +319,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): self.room_id, body="foo", tok=self.other_user_tok, - expect_code=HTTPStatus.FORBIDDEN, + expect_code=403, ) # Test that room is not purged @@ -398,7 +398,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): self._has_no_members(self.room_id) # Assert we can no longer peek into the room - self._assert_peek(self.room_id, expect_code=HTTPStatus.FORBIDDEN) + self._assert_peek(self.room_id, expect_code=403) def _is_blocked(self, room_id: str, expect: bool = True) -> None: """Assert that the room is blocked or not""" @@ -494,7 +494,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): ) def test_requester_is_no_admin(self, method: str, url: str) -> None: """ - If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. + If the user is not a server admin, an error 403 is returned. """ channel = self.make_request( @@ -504,7 +504,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_room_does_not_exist(self) -> None: @@ -696,7 +696,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_delete_same_room_twice(self) -> None: @@ -858,7 +858,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.room_id, body="foo", tok=self.other_user_tok, - expect_code=HTTPStatus.FORBIDDEN, + expect_code=403, ) # Test that room is not purged @@ -955,7 +955,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self._has_no_members(self.room_id) # Assert we can no longer peek into the room - self._assert_peek(self.room_id, expect_code=HTTPStatus.FORBIDDEN) + self._assert_peek(self.room_id, expect_code=403) def _is_blocked(self, room_id: str, expect: bool = True) -> None: """Assert that the room is blocked or not""" @@ -1782,7 +1782,7 @@ class RoomTestCase(unittest.HomeserverTestCase): # delete the rooms and get joined roomed membership url = f"/_matrix/client/r0/rooms/{room_id}/joined_members" channel = self.make_request("GET", url.encode("ascii"), access_token=user_tok) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @@ -1811,7 +1811,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): def test_requester_is_no_admin(self) -> None: """ - If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. + If the user is not a server admin, an error 403 is returned. """ channel = self.make_request( @@ -1821,7 +1821,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.second_tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_invalid_parameter(self) -> None: @@ -1841,7 +1841,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): def test_local_user_does_not_exist(self) -> None: """ - Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND + Tests that a lookup for a user that does not exist returns a 404 """ channel = self.make_request( @@ -1851,7 +1851,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_remote_user(self) -> None: @@ -1874,7 +1874,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): def test_room_does_not_exist(self) -> None: """ - Check that unknown rooms/server return error HTTPStatus.NOT_FOUND. + Check that unknown rooms/server return error 404. """ url = "/_synapse/admin/v1/join/!unknown:test" @@ -1885,7 +1885,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual( "Can't join remote room because no servers that are in the room have been provided.", channel.json_body["error"], @@ -1952,7 +1952,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_join_private_room_if_member(self) -> None: @@ -2067,7 +2067,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): % (room_id, events[midway]["event_id"]), access_token=tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_context_as_admin(self) -> None: @@ -2277,7 +2277,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): @parameterized.expand([("PUT",), ("GET",)]) def test_requester_is_no_admin(self, method: str) -> None: - """If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned.""" + """If the user is not a server admin, an error 403 is returned.""" channel = self.make_request( method, @@ -2286,7 +2286,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand([("PUT",), ("GET",)]) diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index bea3ac34d8..3a6b98fbb5 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -57,7 +57,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url) self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -72,7 +72,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) @@ -80,7 +80,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): @override_config({"server_notices": {"system_mxid_localpart": "notices"}}) def test_user_does_not_exist(self) -> None: - """Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND""" + """Tests that a lookup for a user that does not exist returns a 404""" channel = self.make_request( "POST", self.url, @@ -88,7 +88,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): content={"user_id": "@unknown_person:test", "content": ""}, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @override_config({"server_notices": {"system_mxid_localpart": "notices"}}) diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index baed27a815..3a8982afea 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -52,7 +52,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", self.url, b"{}") self.assertEqual( - HTTPStatus.UNAUTHORIZED, + 401, channel.code, msg=channel.json_body, ) @@ -60,7 +60,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): def test_requester_is_no_admin(self) -> None: """ - If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. + If the user is not a server admin, an error 403 is returned. """ channel = self.make_request( "GET", @@ -70,7 +70,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - HTTPStatus.FORBIDDEN, + 403, channel.code, msg=channel.json_body, ) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index c2b54b1ef7..beb1b1c120 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -142,7 +142,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual("HMAC incorrect", channel.json_body["error"]) def test_register_correct_nonce(self) -> None: @@ -375,7 +375,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.assertEqual("@bob3:test", channel.json_body["user_id"]) channel = self.make_request("GET", "/profile/@bob3:test/displayname") - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) # set displayname channel = self.make_request("GET", self.url) @@ -466,7 +466,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self) -> None: @@ -478,7 +478,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", self.url, access_token=other_user_token) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_all_users(self) -> None: @@ -941,7 +941,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): """ channel = self.make_request("POST", self.url, b"{}") - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_not_admin(self) -> None: @@ -952,7 +952,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", url, access_token=self.other_user_token) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) channel = self.make_request( @@ -962,12 +962,12 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): content=b"{}", ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) def test_user_does_not_exist(self) -> None: """ - Tests that deactivation for a user that does not exist returns a HTTPStatus.NOT_FOUND + Tests that deactivation for a user that does not exist returns a 404 """ channel = self.make_request( @@ -976,7 +976,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_erase_is_not_bool(self) -> None: @@ -1220,7 +1220,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.other_user_token, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) channel = self.make_request( @@ -1230,12 +1230,12 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=b"{}", ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) def test_user_does_not_exist(self) -> None: """ - Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND + Tests that a lookup for a user that does not exist returns a 404 """ channel = self.make_request( @@ -1244,7 +1244,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"]) def test_invalid_parameter(self) -> None: @@ -1379,7 +1379,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1434,7 +1434,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1512,7 +1512,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"password": "abc123", "admin": False}, ) - self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertFalse(channel.json_body["admin"]) @@ -1550,7 +1550,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): ) # Admin user is not blocked by mau anymore - self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertFalse(channel.json_body["admin"]) @@ -1585,7 +1585,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) @@ -1626,7 +1626,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) @@ -1666,7 +1666,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content=body, ) - self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("msisdn", channel.json_body["threepids"][0]["medium"]) self.assertEqual("1234567890", channel.json_body["threepids"][0]["address"]) @@ -2064,7 +2064,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): ) # must fail - self.assertEqual(HTTPStatus.CONFLICT, channel.code, msg=channel.json_body) + self.assertEqual(409, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) self.assertEqual("External id is already in use.", channel.json_body["error"]) @@ -2261,7 +2261,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"deactivated": False, "password": "foo"}, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Reactivate the user without a password. @@ -2295,7 +2295,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"deactivated": False, "password": "foo"}, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Reactivate the user without a password. @@ -2407,7 +2407,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"password": "abc123"}, ) - self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("bob", channel.json_body["displayname"]) @@ -2520,7 +2520,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self) -> None: @@ -2535,7 +2535,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): access_token=other_user_token, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_user_does_not_exist(self) -> None: @@ -2678,7 +2678,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self) -> None: @@ -2693,12 +2693,12 @@ class PushersRestTestCase(unittest.HomeserverTestCase): access_token=other_user_token, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_user_does_not_exist(self) -> None: """ - Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND + Tests that a lookup for a user that does not exist returns a 404 """ url = "/_synapse/admin/v1/users/@unknown_person:test/pushers" channel = self.make_request( @@ -2707,7 +2707,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_user_is_not_local(self) -> None: @@ -2808,7 +2808,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): """Try to list media of an user without authentication.""" channel = self.make_request(method, self.url, {}) - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) @@ -2822,12 +2822,12 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=other_user_token, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) def test_user_does_not_exist(self, method: str) -> None: - """Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND""" + """Tests that a lookup for a user that does not exist returns a 404""" url = "/_synapse/admin/v1/users/@unknown_person:test/media" channel = self.make_request( method, @@ -2835,7 +2835,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) @@ -3393,7 +3393,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): """Try to login as a user without authentication.""" channel = self.make_request("POST", self.url, b"{}") - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_not_admin(self) -> None: @@ -3402,7 +3402,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): "POST", self.url, b"{}", access_token=self.other_user_tok ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) def test_send_event(self) -> None: """Test that sending event as a user works.""" @@ -3447,7 +3447,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): # The puppet token should no longer work channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) # .. but the real user's tokens should still work channel = self.make_request( @@ -3480,7 +3480,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) def test_admin_logout_all(self) -> None: """Tests that the admin user calling `/logout/all` does expire the @@ -3501,7 +3501,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): # The puppet token should no longer work channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) # .. but the real user's tokens should still work channel = self.make_request( @@ -3538,7 +3538,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): room_id, "com.example.test", tok=self.other_user_tok, - expect_code=HTTPStatus.FORBIDDEN, + expect_code=403, ) # Login in as the user @@ -3559,7 +3559,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): room_id, user=self.other_user, tok=self.other_user_tok, - expect_code=HTTPStatus.FORBIDDEN, + expect_code=403, ) # Logging in as the other user and joining a room should work, even @@ -3594,7 +3594,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): Try to get information of an user without authentication. """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_not_admin(self) -> None: @@ -3609,7 +3609,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): self.url, access_token=other_user2_token, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_user_is_not_local(self) -> None: @@ -3680,7 +3680,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): Try to get information of an user without authentication. """ channel = self.make_request(method, self.url) - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["POST", "DELETE"]) @@ -3691,7 +3691,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): other_user_token = self.login("user", "pass") channel = self.make_request(method, self.url, access_token=other_user_token) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["POST", "DELETE"]) @@ -3762,7 +3762,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): """ channel = self.make_request(method, self.url, b"{}") - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "POST", "DELETE"]) @@ -3778,13 +3778,13 @@ class RateLimitTestCase(unittest.HomeserverTestCase): access_token=other_user_token, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "POST", "DELETE"]) def test_user_does_not_exist(self, method: str) -> None: """ - Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND + Tests that a lookup for a user that does not exist returns a 404 """ url = "/_synapse/admin/v1/users/@unknown_person:test/override_ratelimit" @@ -3794,7 +3794,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @parameterized.expand( @@ -3982,7 +3982,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): """Try to get information of a user without authentication.""" channel = self.make_request("GET", self.url, {}) - self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self) -> None: @@ -3995,7 +3995,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): access_token=other_user_token, ) - self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_user_does_not_exist(self) -> None: @@ -4008,7 +4008,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_user_is_not_local(self) -> None: -- cgit 1.4.1 From 1b09b0832ed56bfc994deadb3315755d0c20433b Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 9 Aug 2022 14:32:33 -0500 Subject: Allow use of both `@trace` and `@tag_args` stacked on the same function (#13453) ```py @trace @tag_args async def get_oldest_event_ids_with_depth_in_room(...) ... ``` Before this PR, you would see a warning in the logs and the span was not exported: ``` 2022-08-03 19:11:59,383 - synapse.logging.opentracing - 835 - ERROR - GET-0 - @trace may not have wrapped EventFederationWorkerStore.get_oldest_event_ids_with_depth_in_room correctly! The function is not async but returned a coroutine. ``` --- changelog.d/13453.misc | 1 + synapse/logging/opentracing.py | 158 ++++++++++++++++++++++++-------------- tests/logging/test_opentracing.py | 83 ++++++++++++++++++++ 3 files changed, 186 insertions(+), 56 deletions(-) create mode 100644 changelog.d/13453.misc diff --git a/changelog.d/13453.misc b/changelog.d/13453.misc new file mode 100644 index 0000000000..d30c5230c8 --- /dev/null +++ b/changelog.d/13453.misc @@ -0,0 +1 @@ +Allow use of both `@trace` and `@tag_args` stacked on the same function (tracing). diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index fa3f76c27f..d1fa2cf8ae 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -173,6 +173,7 @@ from typing import ( Any, Callable, Collection, + ContextManager, Dict, Generator, Iterable, @@ -823,75 +824,117 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte # Tracing decorators -def trace_with_opname(opname: str) -> Callable[[Callable[P, R]], Callable[P, R]]: +def _custom_sync_async_decorator( + func: Callable[P, R], + wrapping_logic: Callable[[Callable[P, R], Any, Any], ContextManager[None]], +) -> Callable[P, R]: """ - Decorator to trace a function with a custom opname. - - See the module's doc string for usage examples. + Decorates a function that is sync or async (coroutines), or that returns a Twisted + `Deferred`. The custom business logic of the decorator goes in `wrapping_logic`. + + Example usage: + ```py + # Decorator to time the function and log it out + def duration(func: Callable[P, R]) -> Callable[P, R]: + @contextlib.contextmanager + def _wrapping_logic(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> Generator[None, None, None]: + start_ts = time.time() + try: + yield + finally: + end_ts = time.time() + duration = end_ts - start_ts + logger.info("%s took %s seconds", func.__name__, duration) + return _custom_sync_async_decorator(func, _wrapping_logic) + ``` + Args: + func: The function to be decorated + wrapping_logic: The business logic of your custom decorator. + This should be a ContextManager so you are able to run your logic + before/after the function as desired. """ - def decorator(func: Callable[P, R]) -> Callable[P, R]: - if opentracing is None: - return func # type: ignore[unreachable] + if inspect.iscoroutinefunction(func): - if inspect.iscoroutinefunction(func): + @wraps(func) + async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + with wrapping_logic(func, *args, **kwargs): + return await func(*args, **kwargs) # type: ignore[misc] - @wraps(func) - async def _trace_inner(*args: P.args, **kwargs: P.kwargs) -> R: - with start_active_span(opname): - return await func(*args, **kwargs) # type: ignore[misc] + else: + # The other case here handles both sync functions and those + # decorated with inlineDeferred. + @wraps(func) + def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + scope = wrapping_logic(func, *args, **kwargs) + scope.__enter__() - else: - # The other case here handles both sync functions and those - # decorated with inlineDeferred. - @wraps(func) - def _trace_inner(*args: P.args, **kwargs: P.kwargs) -> R: - scope = start_active_span(opname) - scope.__enter__() - - try: - result = func(*args, **kwargs) - if isinstance(result, defer.Deferred): - - def call_back(result: R) -> R: - scope.__exit__(None, None, None) - return result - - def err_back(result: R) -> R: - scope.__exit__(None, None, None) - return result - - result.addCallbacks(call_back, err_back) - - else: - if inspect.isawaitable(result): - logger.error( - "@trace may not have wrapped %s correctly! " - "The function is not async but returned a %s.", - func.__qualname__, - type(result).__name__, - ) + try: + result = func(*args, **kwargs) + if isinstance(result, defer.Deferred): + + def call_back(result: R) -> R: + scope.__exit__(None, None, None) + return result + def err_back(result: R) -> R: scope.__exit__(None, None, None) + return result + + result.addCallbacks(call_back, err_back) + + else: + if inspect.isawaitable(result): + logger.error( + "@trace may not have wrapped %s correctly! " + "The function is not async but returned a %s.", + func.__qualname__, + type(result).__name__, + ) + + scope.__exit__(None, None, None) - return result + return result - except Exception as e: - scope.__exit__(type(e), None, e.__traceback__) - raise + except Exception as e: + scope.__exit__(type(e), None, e.__traceback__) + raise - return _trace_inner # type: ignore[return-value] + return _wrapper # type: ignore[return-value] - return decorator + +def trace_with_opname( + opname: str, + *, + tracer: Optional["opentracing.Tracer"] = None, +) -> Callable[[Callable[P, R]], Callable[P, R]]: + """ + Decorator to trace a function with a custom opname. + See the module's doc string for usage examples. + """ + + # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909 + @contextlib.contextmanager # type: ignore[arg-type] + def _wrapping_logic( + func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> Generator[None, None, None]: + with start_active_span(opname, tracer=tracer): + yield + + def _decorator(func: Callable[P, R]) -> Callable[P, R]: + if not opentracing: + return func + + return _custom_sync_async_decorator(func, _wrapping_logic) + + return _decorator def trace(func: Callable[P, R]) -> Callable[P, R]: """ Decorator to trace a function. - Sets the operation name to that of the function's name. - See the module's doc string for usage examples. """ @@ -900,7 +943,7 @@ def trace(func: Callable[P, R]) -> Callable[P, R]: def tag_args(func: Callable[P, R]) -> Callable[P, R]: """ - Tags all of the args to the active span. + Decorator to tag all of the args to the active span. Args: func: `func` is assumed to be a method taking a `self` parameter, or a @@ -911,22 +954,25 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: if not opentracing: return func - @wraps(func) - def _tag_args_inner(*args: P.args, **kwargs: P.kwargs) -> R: + # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909 + @contextlib.contextmanager # type: ignore[arg-type] + def _wrapping_logic( + func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> Generator[None, None, None]: argspec = inspect.getfullargspec(func) # We use `[1:]` to skip the `self` object reference and `start=1` to # make the index line up with `argspec.args`. # - # FIXME: We could update this handle any type of function by ignoring the + # FIXME: We could update this to handle any type of function by ignoring the # first argument only if it's named `self` or `cls`. This isn't fool-proof # but handles the idiomatic cases. for i, arg in enumerate(args[1:], start=1): # type: ignore[index] set_tag("ARG_" + argspec.args[i], str(arg)) set_tag("args", str(args[len(argspec.args) :])) # type: ignore[index] set_tag("kwargs", str(kwargs)) - return func(*args, **kwargs) + yield - return _tag_args_inner + return _custom_sync_async_decorator(func, _wrapping_logic) @contextlib.contextmanager diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 3b14c76d7e..0917e478a5 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -25,6 +25,8 @@ from synapse.logging.context import ( from synapse.logging.opentracing import ( start_active_span, start_active_span_follows_from, + tag_args, + trace_with_opname, ) from synapse.util import Clock @@ -38,8 +40,12 @@ try: except ImportError: jaeger_client = None # type: ignore +import logging + from tests.unittest import TestCase +logger = logging.getLogger(__name__) + class LogContextScopeManagerTestCase(TestCase): """ @@ -194,3 +200,80 @@ class LogContextScopeManagerTestCase(TestCase): self._reporter.get_spans(), [scopes[1].span, scopes[2].span, scopes[0].span], ) + + def test_trace_decorator_sync(self) -> None: + """ + Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` + with sync functions + """ + with LoggingContext("root context"): + + @trace_with_opname("fixture_sync_func", tracer=self._tracer) + @tag_args + def fixture_sync_func() -> str: + return "foo" + + result = fixture_sync_func() + self.assertEqual(result, "foo") + + # the span should have been reported + self.assertEqual( + [span.operation_name for span in self._reporter.get_spans()], + ["fixture_sync_func"], + ) + + def test_trace_decorator_deferred(self) -> None: + """ + Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` + with functions that return deferreds + """ + reactor = MemoryReactorClock() + + with LoggingContext("root context"): + + @trace_with_opname("fixture_deferred_func", tracer=self._tracer) + @tag_args + def fixture_deferred_func() -> "defer.Deferred[str]": + d1: defer.Deferred[str] = defer.Deferred() + d1.callback("foo") + return d1 + + result_d1 = fixture_deferred_func() + + # let the tasks complete + reactor.pump((2,) * 8) + + self.assertEqual(self.successResultOf(result_d1), "foo") + + # the span should have been reported + self.assertEqual( + [span.operation_name for span in self._reporter.get_spans()], + ["fixture_deferred_func"], + ) + + def test_trace_decorator_async(self) -> None: + """ + Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` + with async functions + """ + reactor = MemoryReactorClock() + + with LoggingContext("root context"): + + @trace_with_opname("fixture_async_func", tracer=self._tracer) + @tag_args + async def fixture_async_func() -> str: + return "foo" + + d1 = defer.ensureDeferred(fixture_async_func()) + + # let the tasks complete + reactor.pump((2,) * 8) + + self.assertEqual(self.successResultOf(d1), "foo") + + # the span should have been reported + self.assertEqual( + [span.operation_name for span in self._reporter.get_spans()], + ["fixture_async_func"], + ) -- cgit 1.4.1 From 51c01d450a5165060c8e17b506388a9b1808dda9 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 10 Aug 2022 12:58:20 +0100 Subject: Add some miscellaneous comments around sync (#13474) Add some miscellaneous comments to document sync, especially around `compute_state_delta`. Signed-off-by: Sean Quah Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/13474.misc | 1 + synapse/handlers/sync.py | 116 +++++++++++++++++++++++++++++++---------------- synapse/visibility.py | 4 +- 3 files changed, 81 insertions(+), 40 deletions(-) create mode 100644 changelog.d/13474.misc diff --git a/changelog.d/13474.misc b/changelog.d/13474.misc new file mode 100644 index 0000000000..d34c661fed --- /dev/null +++ b/changelog.d/13474.misc @@ -0,0 +1 @@ +Add some miscellaneous comments to document sync, especially around `compute_state_delta`. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index d827c03ad1..3ca01391c9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -13,7 +13,17 @@ # limitations under the License. import itertools import logging -from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Dict, + FrozenSet, + List, + Optional, + Sequence, + Set, + Tuple, +) import attr from prometheus_client import Counter @@ -89,7 +99,7 @@ class SyncConfig: @attr.s(slots=True, frozen=True, auto_attribs=True) class TimelineBatch: prev_batch: StreamToken - events: List[EventBase] + events: Sequence[EventBase] limited: bool # A mapping of event ID to the bundled aggregations for the above events. # This is only calculated if limited is true. @@ -852,16 +862,26 @@ class SyncHandler: now_token: StreamToken, full_state: bool, ) -> MutableStateMap[EventBase]: - """Works out the difference in state between the start of the timeline - and the previous sync. + """Works out the difference in state between the end of the previous sync and + the start of the timeline. Args: room_id: batch: The timeline batch for the room that will be sent to the user. sync_config: - since_token: Token of the end of the previous batch. May be None. + since_token: Token of the end of the previous batch. May be `None`. now_token: Token of the end of the current batch. full_state: Whether to force returning the full state. + `lazy_load_members` still applies when `full_state` is `True`. + + Returns: + The state to return in the sync response for the room. + + Clients will overlay this onto the state at the end of the previous sync to + arrive at the state at the start of the timeline. + + Clients will then overlay state events in the timeline to arrive at the + state at the end of the timeline, in preparation for the next sync. """ # TODO(mjark) Check if the state events were received by the server # after the previous sync, since we need to include those state @@ -869,7 +889,8 @@ class SyncHandler: # TODO(mjark) Check for new redactions in the state events. with Measure(self.clock, "compute_state_delta"): - + # The memberships needed for events in the timeline. + # Only calculated when `lazy_load_members` is on. members_to_fetch = None lazy_load_members = sync_config.filter_collection.lazy_load_members() @@ -897,38 +918,46 @@ class SyncHandler: else: state_filter = StateFilter.all() + # The contribution to the room state from state events in the timeline. + # Only contains the last event for any given state key. timeline_state = { (event.type, event.state_key): event.event_id for event in batch.events if event.is_state() } + # Now calculate the state to return in the sync response for the room. + # This is more or less the change in state between the end of the previous + # sync's timeline and the start of the current sync's timeline. + # See the docstring above for details. + state_ids: StateMap[str] + if full_state: if batch: - current_state_ids = ( + state_at_timeline_end = ( await self._state_storage_controller.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) ) - state_ids = ( + state_at_timeline_start = ( await self._state_storage_controller.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) ) else: - current_state_ids = await self.get_state_at( + state_at_timeline_end = await self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) - state_ids = current_state_ids + state_at_timeline_start = state_at_timeline_end state_ids = _calculate_state( timeline_contains=timeline_state, - timeline_start=state_ids, - previous={}, - current=current_state_ids, + timeline_start=state_at_timeline_start, + timeline_end=state_at_timeline_end, + previous_timeline_end={}, lazy_load_members=lazy_load_members, ) elif batch.limited: @@ -968,24 +997,23 @@ class SyncHandler: ) if batch: - current_state_ids = ( + state_at_timeline_end = ( await self._state_storage_controller.get_state_ids_for_event( batch.events[-1].event_id, state_filter=state_filter ) ) else: - # Its not clear how we get here, but empirically we do - # (#5407). Logging has been added elsewhere to try and - # figure out where this state comes from. - current_state_ids = await self.get_state_at( + # We can get here if the user has ignored the senders of all + # the recent events. + state_at_timeline_end = await self.get_state_at( room_id, stream_position=now_token, state_filter=state_filter ) state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_at_timeline_start, - previous=state_at_previous_sync, - current=current_state_ids, + timeline_end=state_at_timeline_end, + previous_timeline_end=state_at_previous_sync, # we have to include LL members in case LL initial sync missed them lazy_load_members=lazy_load_members, ) @@ -1010,6 +1038,13 @@ class SyncHandler: ), ) + # At this point, if `lazy_load_members` is enabled, `state_ids` includes + # the memberships of all event senders in the timeline. This is because we + # may not have sent the memberships in a previous sync. + + # When `include_redundant_members` is on, we send all the lazy-loaded + # memberships of event senders. Otherwise we make an effort to limit the set + # of memberships we send to those that we have not already sent to this client. if lazy_load_members and not include_redundant_members: cache_key = (sync_config.user.to_string(), sync_config.device_id) cache = self.get_lazy_loaded_members_cache(cache_key) @@ -2216,8 +2251,8 @@ def _action_has_highlight(actions: List[JsonDict]) -> bool: def _calculate_state( timeline_contains: StateMap[str], timeline_start: StateMap[str], - previous: StateMap[str], - current: StateMap[str], + timeline_end: StateMap[str], + previous_timeline_end: StateMap[str], lazy_load_members: bool, ) -> StateMap[str]: """Works out what state to include in a sync response. @@ -2225,45 +2260,50 @@ def _calculate_state( Args: timeline_contains: state in the timeline timeline_start: state at the start of the timeline - previous: state at the end of the previous sync (or empty dict + timeline_end: state at the end of the timeline + previous_timeline_end: state at the end of the previous sync (or empty dict if this is an initial sync) - current: state at the end of the timeline lazy_load_members: whether to return members from timeline_start or not. assumes that timeline_start has already been filtered to include only the members the client needs to know about. """ - event_id_to_key = { - e: key - for key, e in itertools.chain( + event_id_to_state_key = { + event_id: state_key + for state_key, event_id in itertools.chain( timeline_contains.items(), - previous.items(), timeline_start.items(), - current.items(), + timeline_end.items(), + previous_timeline_end.items(), ) } - c_ids = set(current.values()) - ts_ids = set(timeline_start.values()) - p_ids = set(previous.values()) - tc_ids = set(timeline_contains.values()) + timeline_end_ids = set(timeline_end.values()) + timeline_start_ids = set(timeline_start.values()) + previous_timeline_end_ids = set(previous_timeline_end.values()) + timeline_contains_ids = set(timeline_contains.values()) # If we are lazyloading room members, we explicitly add the membership events # for the senders in the timeline into the state block returned by /sync, # as we may not have sent them to the client before. We find these membership # events by filtering them out of timeline_start, which has already been filtered # to only include membership events for the senders in the timeline. - # In practice, we can do this by removing them from the p_ids list, - # which is the list of relevant state we know we have already sent to the client. + # In practice, we can do this by removing them from the previous_timeline_end_ids + # list, which is the list of relevant state we know we have already sent to the + # client. # see https://github.com/matrix-org/synapse/pull/2970/files/efcdacad7d1b7f52f879179701c7e0d9b763511f#r204732809 if lazy_load_members: - p_ids.difference_update( + previous_timeline_end_ids.difference_update( e for t, e in timeline_start.items() if t[0] == EventTypes.Member ) - state_ids = ((c_ids | ts_ids) - p_ids) - tc_ids + state_ids = ( + (timeline_end_ids | timeline_start_ids) + - previous_timeline_end_ids + - timeline_contains_ids + ) - return {event_id_to_key[e]: e for e in state_ids} + return {event_id_to_state_key[e]: e for e in state_ids} @attr.s(slots=True, auto_attribs=True) diff --git a/synapse/visibility.py b/synapse/visibility.py index d947edde66..c810a05907 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -73,8 +73,8 @@ async def filter_events_for_client( * the user is not currently a member of the room, and: * the user has not been a member of the room since the given events - always_include_ids: set of event ids to specifically - include (unless sender is ignored) + always_include_ids: set of event ids to specifically include, if present + in events (unless sender is ignored) filter_send_to_client: Whether we're checking an event that's going to be sent to a client. This might not always be the case since this function can also be called to check whether a user can see the state at a given point. -- cgit 1.4.1 From 2281427175e4c93a30c39607fb4ac23c2a1f399f Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 10 Aug 2022 20:01:12 +0200 Subject: Use literals in place of `HTTPStatus` constants in tests (#13488) * Use literals in place of `HTTPStatus` constants in tests * newsfile * code style * code style --- changelog.d/13488.misc | 1 + tests/rest/admin/test_background_updates.py | 7 +- tests/rest/admin/test_device.py | 15 ++- tests/rest/admin/test_event_reports.py | 75 +++--------- tests/rest/admin/test_federation.py | 17 ++- tests/rest/admin/test_media.py | 99 +++------------- tests/rest/admin/test_registration_tokens.py | 167 +++++---------------------- tests/rest/admin/test_room.py | 49 ++++---- tests/rest/admin/test_server_notice.py | 15 ++- tests/rest/admin/test_statistics.py | 61 ++-------- tests/rest/admin/test_user.py | 107 +++++++++-------- tests/rest/admin/test_username_available.py | 11 +- 12 files changed, 177 insertions(+), 447 deletions(-) create mode 100644 changelog.d/13488.misc diff --git a/changelog.d/13488.misc b/changelog.d/13488.misc new file mode 100644 index 0000000000..315930deab --- /dev/null +++ b/changelog.d/13488.misc @@ -0,0 +1 @@ +Use literals in place of `HTTPStatus` constants in tests. \ No newline at end of file diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index 7cd8b52f02..d507a3af8d 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus from typing import Collection from parameterized import parameterized @@ -81,7 +80,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) # job_name invalid @@ -92,7 +91,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) def _register_bg_update(self) -> None: @@ -365,4 +364,4 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index 6a6e8ad7d8..d52aee8f92 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import urllib.parse -from http import HTTPStatus from parameterized import parameterized @@ -104,7 +103,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): @parameterized.expand(["GET", "PUT", "DELETE"]) def test_user_is_not_local(self, method: str) -> None: """ - Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for a user that is not a local returns a 400 """ url = ( "/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices/%s" @@ -117,7 +116,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) def test_unknown_device(self) -> None: @@ -179,7 +178,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): content=update, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"]) # Ensure the display name was not updated. @@ -353,7 +352,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): def test_user_is_not_local(self) -> None: """ - Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for a user that is not a local returns a 400 """ url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices" @@ -363,7 +362,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) def test_user_has_no_devices(self) -> None: @@ -479,7 +478,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): def test_user_is_not_local(self) -> None: """ - Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for a user that is not a local returns a 400 """ url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/delete_devices" @@ -489,7 +488,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) def test_unknown_devices(self) -> None: diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index c63a86d5b3..fbc490f46d 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus from typing import List from twisted.test.proto_helpers import MemoryReactor @@ -81,11 +80,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self) -> None: @@ -99,11 +94,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_default_success(self) -> None: @@ -278,7 +269,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): def test_invalid_search_order(self) -> None: """ - Testing that a invalid search order returns a HTTPStatus.BAD_REQUEST + Testing that a invalid search order returns a 400 """ channel = self.make_request( @@ -287,17 +278,13 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual("Unknown direction: bar", channel.json_body["error"]) def test_limit_is_negative(self) -> None: """ - Testing that a negative limit parameter returns a HTTPStatus.BAD_REQUEST + Testing that a negative limit parameter returns a 400 """ channel = self.make_request( @@ -306,16 +293,12 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) def test_from_is_negative(self) -> None: """ - Testing that a negative from parameter returns a HTTPStatus.BAD_REQUEST + Testing that a negative from parameter returns a 400 """ channel = self.make_request( @@ -324,11 +307,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) def test_next_token(self) -> None: @@ -466,11 +445,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self) -> None: @@ -484,11 +459,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_default_success(self) -> None: @@ -507,7 +478,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): def test_invalid_report_id(self) -> None: """ - Testing that an invalid `report_id` returns a HTTPStatus.BAD_REQUEST. + Testing that an invalid `report_id` returns a 400. """ # `report_id` is negative @@ -517,11 +488,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", @@ -535,11 +502,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", @@ -553,11 +516,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", @@ -575,11 +534,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - 404, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) self.assertEqual("Event report not found", channel.json_body["error"]) diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index 8affd830d1..4c7864c629 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus from typing import List, Optional from parameterized import parameterized @@ -77,7 +76,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative from @@ -87,7 +86,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # unkown order_by @@ -97,7 +96,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid search order @@ -107,7 +106,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid destination @@ -469,7 +468,7 @@ class FederationTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "The retry timing does not need to be reset for this destination.", channel.json_body["error"], @@ -574,7 +573,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative from @@ -584,7 +583,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid search order @@ -594,7 +593,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid destination diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index d51c10a515..aadb31ca83 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -from http import HTTPStatus from parameterized import parameterized @@ -81,11 +80,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.other_user_token, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_media_does_not_exist(self) -> None: @@ -105,7 +100,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): def test_media_is_not_local(self) -> None: """ - Tests that a lookup for a media that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for a media that is not a local returns a 400 """ url = "/_synapse/admin/v1/media/%s/%s" % ("unknown_domain", "12345") @@ -115,7 +110,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only delete local media", channel.json_body["error"]) def test_delete_media(self) -> None: @@ -230,11 +225,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url, b"{}") - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self) -> None: @@ -250,16 +241,12 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): access_token=self.other_user_token, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_media_is_not_local(self) -> None: """ - Tests that a lookup for media that is not local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for media that is not local returns a 400 """ url = "/_synapse/admin/v1/media/%s/delete" % "unknown_domain" @@ -269,7 +256,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only delete local media", channel.json_body["error"]) def test_missing_parameter(self) -> None: @@ -282,11 +269,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) self.assertEqual( "Missing integer query parameter 'before_ts'", channel.json_body["error"] @@ -302,11 +285,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "Query parameter before_ts must be a positive integer.", @@ -319,11 +298,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "Query parameter before_ts you provided is from the year 1970. " @@ -337,11 +312,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "Query parameter size_gt must be a string representing a positive integer.", @@ -354,11 +325,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "Boolean query parameter 'keep_profiles' must be one of ['true', 'false']", @@ -667,11 +634,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): b"{}", ) - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["quarantine", "unquarantine"]) @@ -688,11 +651,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.other_user_token, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_quarantine_media(self) -> None: @@ -800,11 +759,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url % (action, self.media_id), b"{}") - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["protect", "unprotect"]) @@ -821,11 +776,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): access_token=self.other_user_token, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_protect_media(self) -> None: @@ -913,11 +864,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): access_token=self.other_user_token, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_invalid_parameter(self) -> None: @@ -930,11 +877,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "Query parameter before_ts must be a positive integer.", @@ -947,11 +890,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "Query parameter before_ts you provided is from the year 1970. " diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py index bcb602382c..8f8abc21c7 100644 --- a/tests/rest/admin/test_registration_tokens.py +++ b/tests/rest/admin/test_registration_tokens.py @@ -13,7 +13,6 @@ # limitations under the License. import random import string -from http import HTTPStatus from typing import Optional from twisted.test.proto_helpers import MemoryReactor @@ -74,11 +73,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): def test_create_no_auth(self) -> None: """Try to create a token without authentication.""" channel = self.make_request("POST", self.url + "/new", {}) - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_create_requester_not_admin(self) -> None: @@ -89,11 +84,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {}, access_token=self.other_user_tok, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_create_using_defaults(self) -> None: @@ -168,11 +159,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) def test_create_token_invalid_chars(self) -> None: @@ -188,11 +175,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) def test_create_token_already_exists(self) -> None: @@ -215,7 +198,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): data, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel2.code, msg=channel2.json_body) + self.assertEqual(400, channel2.code, msg=channel2.json_body) self.assertEqual(channel2.json_body["errcode"], Codes.INVALID_PARAM) def test_create_unable_to_generate_token(self) -> None: @@ -262,7 +245,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) self.assertEqual( - HTTPStatus.BAD_REQUEST, + 400, channel.code, msg=channel.json_body, ) @@ -275,11 +258,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"uses_allowed": 1.5}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) def test_create_expiry_time(self) -> None: @@ -291,11 +270,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"expiry_time": self.clock.time_msec() - 10000}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # Should fail with float @@ -305,11 +280,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"expiry_time": self.clock.time_msec() + 1000000.5}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) def test_create_length(self) -> None: @@ -331,11 +302,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"length": 0}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # Should fail with a negative integer @@ -345,11 +312,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"length": -5}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # Should fail with a float @@ -359,11 +322,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"length": 8.5}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # Should fail with 65 @@ -373,11 +332,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"length": 65}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # UPDATING @@ -389,11 +344,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): self.url + "/1234", # Token doesn't exist but that doesn't matter {}, ) - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_update_requester_not_admin(self) -> None: @@ -404,11 +355,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {}, access_token=self.other_user_tok, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_update_non_existent(self) -> None: @@ -420,11 +367,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - 404, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) def test_update_uses_allowed(self) -> None: @@ -472,11 +415,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"uses_allowed": 1.5}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # Should fail with a negative integer @@ -486,11 +425,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"uses_allowed": -5}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) def test_update_expiry_time(self) -> None: @@ -529,11 +464,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"expiry_time": past_time}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # Should fail a float @@ -543,11 +474,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {"expiry_time": new_expiry_time + 0.5}, access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) def test_update_both(self) -> None: @@ -589,11 +516,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # DELETING @@ -605,11 +528,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): self.url + "/1234", # Token doesn't exist but that doesn't matter {}, ) - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_delete_requester_not_admin(self) -> None: @@ -620,11 +539,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {}, access_token=self.other_user_tok, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_delete_non_existent(self) -> None: @@ -636,11 +551,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - 404, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) def test_delete(self) -> None: @@ -666,11 +577,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): self.url + "/1234", # Token doesn't exist but that doesn't matter {}, ) - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_get_requester_not_admin(self) -> None: @@ -697,11 +604,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - 404, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) def test_get(self) -> None: @@ -728,11 +631,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): def test_list_no_auth(self) -> None: """Try to list tokens without authentication.""" channel = self.make_request("GET", self.url, {}) - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_list_requester_not_admin(self) -> None: @@ -743,11 +642,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): {}, access_token=self.other_user_tok, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_list_all(self) -> None: @@ -780,11 +675,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) def _test_list_query_parameter(self, valid: str) -> None: """Helper used to test both valid=true and valid=false.""" diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 8350f9025a..dd5000679a 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import urllib.parse -from http import HTTPStatus from typing import List, Optional from unittest.mock import Mock @@ -98,7 +97,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): def test_room_is_not_valid(self) -> None: """ - Check that invalid room names, return an error HTTPStatus.BAD_REQUEST. + Check that invalid room names, return an error 400. """ url = "/_synapse/admin/v1/rooms/%s" % "invalidroom" @@ -109,7 +108,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "invalidroom is not a legal room ID", channel.json_body["error"], @@ -145,7 +144,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "User must be our own: @not:exist.bla", channel.json_body["error"], @@ -163,7 +162,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) def test_purge_is_not_bool(self) -> None: @@ -178,7 +177,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) def test_purge_room_and_block(self) -> None: @@ -546,7 +545,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): ) def test_room_is_not_valid(self, method: str, url: str) -> None: """ - Check that invalid room names, return an error HTTPStatus.BAD_REQUEST. + Check that invalid room names, return an error 400. """ channel = self.make_request( @@ -556,7 +555,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "invalidroom is not a legal room ID", channel.json_body["error"], @@ -592,7 +591,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "User must be our own: @not:exist.bla", channel.json_body["error"], @@ -610,7 +609,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) def test_purge_is_not_bool(self) -> None: @@ -625,7 +624,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) def test_delete_expired_status(self) -> None: @@ -722,9 +721,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, second_channel.code, msg=second_channel.json_body - ) + self.assertEqual(400, second_channel.code, msg=second_channel.json_body) self.assertEqual(Codes.UNKNOWN, second_channel.json_body["errcode"]) self.assertEqual( f"History purge already in progress for {self.room_id}", @@ -1546,7 +1543,7 @@ class RoomTestCase(unittest.HomeserverTestCase): _search_test(None, "foo") _search_test(None, "bar") - _search_test(None, "", expected_http_code=HTTPStatus.BAD_REQUEST) + _search_test(None, "", expected_http_code=400) # Test that the whole room id returns the room _search_test(room_id_1, room_id_1) @@ -1836,7 +1833,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) def test_local_user_does_not_exist(self) -> None: @@ -1866,7 +1863,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "This endpoint can only be used with local users", channel.json_body["error"], @@ -1893,7 +1890,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): def test_room_is_not_valid(self) -> None: """ - Check that invalid room names, return an error HTTPStatus.BAD_REQUEST. + Check that invalid room names, return an error 400. """ url = "/_synapse/admin/v1/join/invalidroom" @@ -1904,7 +1901,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "invalidroom was not legal room ID or room alias", channel.json_body["error"], @@ -2243,11 +2240,11 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - # We expect this to fail with a HTTPStatus.BAD_REQUEST as there are no room admins. + # We expect this to fail with a 400 as there are no room admins. # # (Note we assert the error message to ensure that it's not denied for # some other reason) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body["error"], "No local admin user in room with power to update power levels.", @@ -2291,7 +2288,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): @parameterized.expand([("PUT",), ("GET",)]) def test_room_is_not_valid(self, method: str) -> None: - """Check that invalid room names, return an error HTTPStatus.BAD_REQUEST.""" + """Check that invalid room names, return an error 400.""" channel = self.make_request( method, @@ -2300,7 +2297,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "invalidroom is not a legal room ID", channel.json_body["error"], @@ -2317,7 +2314,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) # `block` is not set @@ -2328,7 +2325,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) # no content is send @@ -2338,7 +2335,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_JSON, channel.json_body["errcode"]) def test_block_room(self) -> None: diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index 3a6b98fbb5..81e125e27d 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus from typing import List from twisted.test.proto_helpers import MemoryReactor @@ -94,7 +93,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): @override_config({"server_notices": {"system_mxid_localpart": "notices"}}) def test_user_is_not_local(self) -> None: """ - Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for a user that is not a local returns a 400 """ channel = self.make_request( "POST", @@ -106,7 +105,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "Server notices can only be sent to local users", channel.json_body["error"] ) @@ -122,7 +121,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_JSON, channel.json_body["errcode"]) # no content @@ -133,7 +132,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): content={"user_id": self.other_user}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) # no body @@ -144,7 +143,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): content={"user_id": self.other_user, "content": ""}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) self.assertEqual("'body' not in content", channel.json_body["error"]) @@ -156,7 +155,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): content={"user_id": self.other_user, "content": {"body": ""}}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) self.assertEqual("'msgtype' not in content", channel.json_body["error"]) @@ -172,7 +171,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): }, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) self.assertEqual( "Server notices are not enabled on this server", channel.json_body["error"] diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index 3a8982afea..b60f16b914 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from http import HTTPStatus from typing import List, Optional from twisted.test.proto_helpers import MemoryReactor @@ -51,11 +50,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual( - 401, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self) -> None: @@ -69,11 +64,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.other_user_tok, ) - self.assertEqual( - 403, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_invalid_parameter(self) -> None: @@ -87,11 +78,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative from @@ -101,11 +88,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative limit @@ -115,11 +98,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative from_ts @@ -129,11 +108,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative until_ts @@ -143,11 +118,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # until_ts smaller from_ts @@ -157,11 +128,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # empty search term @@ -171,11 +138,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid search order @@ -185,11 +148,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) def test_limit(self) -> None: diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index beb1b1c120..411e4ec005 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -17,7 +17,6 @@ import hmac import os import urllib.parse from binascii import unhexlify -from http import HTTPStatus from typing import List, Optional from unittest.mock import Mock, patch @@ -79,7 +78,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url, b"{}") - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "Shared secret registration is not enabled", channel.json_body["error"] ) @@ -111,7 +110,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): body = {"nonce": nonce} channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("username must be specified", channel.json_body["error"]) # 61 seconds @@ -119,7 +118,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("unrecognised nonce", channel.json_body["error"]) def test_register_incorrect_nonce(self) -> None: @@ -198,7 +197,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Now, try and reuse it channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("unrecognised nonce", channel.json_body["error"]) def test_missing_parts(self) -> None: @@ -219,7 +218,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must be an empty body present channel = self.make_request("POST", self.url, {}) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("nonce must be specified", channel.json_body["error"]) # @@ -229,28 +228,28 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Must be present channel = self.make_request("POST", self.url, {"nonce": nonce()}) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("username must be specified", channel.json_body["error"]) # Must be a string body = {"nonce": nonce(), "username": 1234} channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Invalid username", channel.json_body["error"]) # Must not have null bytes body = {"nonce": nonce(), "username": "abcd\u0000"} channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Invalid username", channel.json_body["error"]) # Must not have null bytes body = {"nonce": nonce(), "username": "a" * 1000} channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Invalid username", channel.json_body["error"]) # @@ -261,28 +260,28 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): body = {"nonce": nonce(), "username": "a"} channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("password must be specified", channel.json_body["error"]) # Must be a string body = {"nonce": nonce(), "username": "a", "password": 1234} channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Invalid password", channel.json_body["error"]) # Must not have null bytes body = {"nonce": nonce(), "username": "a", "password": "abcd\u0000"} channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Invalid password", channel.json_body["error"]) # Super long body = {"nonce": nonce(), "username": "a", "password": "A" * 1000} channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Invalid password", channel.json_body["error"]) # @@ -298,7 +297,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): } channel = self.make_request("POST", self.url, body) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Invalid user type", channel.json_body["error"]) def test_displayname(self) -> None: @@ -591,7 +590,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative from @@ -601,7 +600,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid guests @@ -611,7 +610,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid deactivated @@ -621,7 +620,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # unkown order_by @@ -631,7 +630,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid search order @@ -641,7 +640,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) def test_limit(self) -> None: @@ -991,18 +990,18 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) def test_user_is_not_local(self) -> None: """ - Tests that deactivation for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that deactivation for a user that is not a local returns a 400 """ url = "/_synapse/admin/v1/deactivate/@unknown_person:unknown_domain" channel = self.make_request("POST", url, access_token=self.admin_user_tok) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only deactivate local users", channel.json_body["error"]) def test_deactivate_user_erase_true(self) -> None: @@ -1259,7 +1258,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"admin": "not_bool"}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) # deactivated not bool @@ -1269,7 +1268,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"deactivated": "not_bool"}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) # password not str @@ -1279,7 +1278,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"password": True}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) # password not length @@ -1289,7 +1288,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"password": "x" * 513}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) # user_type not valid @@ -1299,7 +1298,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"user_type": "new type"}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) # external_ids not valid @@ -1311,7 +1310,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): "external_ids": {"auth_provider": "prov", "wrong_external_id": "id"} }, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) channel = self.make_request( @@ -1320,7 +1319,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"external_ids": {"external_id": "id"}}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) # threepids not valid @@ -1330,7 +1329,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"threepids": {"medium": "email", "wrong_address": "id"}}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) channel = self.make_request( @@ -1339,7 +1338,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"threepids": {"address": "value"}}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) def test_get_user(self) -> None: @@ -2228,7 +2227,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, content={"deactivated": False}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) # Reactivate the user. channel = self.make_request( @@ -2431,7 +2430,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): content={"password": "abc123", "deactivated": "false"}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) # Check user is not deactivated channel = self.make_request( @@ -2712,7 +2711,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): def test_user_is_not_local(self) -> None: """ - Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for a user that is not a local returns a 400 """ url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/pushers" @@ -2722,7 +2721,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only look up local users", channel.json_body["error"]) def test_get_pushers(self) -> None: @@ -2840,7 +2839,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): @parameterized.expand(["GET", "DELETE"]) def test_user_is_not_local(self, method: str) -> None: - """Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST""" + """Tests that a lookup for a user that is not a local returns a 400""" url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/media" channel = self.make_request( @@ -2849,7 +2848,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only look up local users", channel.json_body["error"]) def test_limit_GET(self) -> None: @@ -2970,7 +2969,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid search order @@ -2980,7 +2979,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative limit @@ -2990,7 +2989,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative from @@ -3000,7 +2999,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) def test_next_token(self) -> None: @@ -3614,7 +3613,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): def test_user_is_not_local(self) -> None: """ - Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for a user that is not a local returns a 400 """ url = self.url_prefix % "@unknown_person:unknown_domain" # type: ignore[attr-defined] @@ -3623,7 +3622,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): url, access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only whois a local user", channel.json_body["error"]) def test_get_whois_admin(self) -> None: @@ -3697,12 +3696,12 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): @parameterized.expand(["POST", "DELETE"]) def test_user_is_not_local(self, method: str) -> None: """ - Tests that shadow-banning for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that shadow-banning for a user that is not a local returns a 400 """ url = "/_synapse/admin/v1/whois/@unknown_person:unknown_domain" channel = self.make_request(method, url, access_token=self.admin_user_tok) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) def test_success(self) -> None: """ @@ -3806,7 +3805,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): ) def test_user_is_not_local(self, method: str, error_msg: str) -> None: """ - Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST + Tests that a lookup for a user that is not a local returns a 400 """ url = ( "/_synapse/admin/v1/users/@unknown_person:unknown_domain/override_ratelimit" @@ -3818,7 +3817,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(error_msg, channel.json_body["error"]) def test_invalid_parameter(self) -> None: @@ -3833,7 +3832,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): content={"messages_per_second": "string"}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # messages_per_second is negative @@ -3844,7 +3843,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): content={"messages_per_second": -1}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # burst_count is a string @@ -3855,7 +3854,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): content={"burst_count": "string"}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # burst_count is negative @@ -3866,7 +3865,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): content={"burst_count": -1}, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) def test_return_zero_when_null(self) -> None: @@ -4021,7 +4020,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only look up local users", channel.json_body["error"]) def test_success(self) -> None: diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py index b5e7eecf87..30f12f1bff 100644 --- a/tests/rest/admin/test_username_available.py +++ b/tests/rest/admin/test_username_available.py @@ -11,9 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from http import HTTPStatus - from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -40,7 +37,7 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase): if username == "allowed": return True raise SynapseError( - HTTPStatus.BAD_REQUEST, + 400, "User ID already taken.", errcode=Codes.USER_IN_USE, ) @@ -67,10 +64,6 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase): url = "%s?username=%s" % (self.url, "disallowed") channel = self.make_request("GET", url, access_token=self.admin_user_tok) - self.assertEqual( - HTTPStatus.BAD_REQUEST, - channel.code, - msg=channel.json_body, - ) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], "M_USER_IN_USE") self.assertEqual(channel.json_body["error"], "User ID already taken.") -- cgit 1.4.1 From 12abd724974a2311d5311272d26d2f8aa11734a9 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 11 Aug 2022 10:27:48 +0000 Subject: Revert 'Remove the unspecced field in the response. (#13365)' to give more time for clients to update. (#13501) --- changelog.d/13501.misc | 1 + synapse/handlers/room_summary.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/13501.misc diff --git a/changelog.d/13501.misc b/changelog.d/13501.misc new file mode 100644 index 0000000000..002b91960c --- /dev/null +++ b/changelog.d/13501.misc @@ -0,0 +1 @@ +Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. (#13365)' to give more time for clients to update. diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index ebd445adca..732b0310bc 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -453,6 +453,7 @@ class RoomSummaryHandler: "type": e.type, "state_key": e.state_key, "content": e.content, + "room_id": e.room_id, "sender": e.sender, "origin_server_ts": e.origin_server_ts, } -- cgit 1.4.1 From 7a25b4302cc1896d191af5aa62efcb0c6b2c49eb Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Thu, 11 Aug 2022 11:40:40 +0100 Subject: 1.65.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/13501.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/13501.misc diff --git a/CHANGES.md b/CHANGES.md index 823f4f40b3..0bc8f95855 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.65.0rc2 (2022-08-11) +============================== + +Internal Changes +---------------- + +- Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. ([\#13365](https://github.com/matrix-org/synapse/issues/13365))' to give more time for clients to update. ([\#13501](https://github.com/matrix-org/synapse/issues/13501)) + + Synapse 1.65.0rc1 (2022-08-09) ============================== diff --git a/changelog.d/13501.misc b/changelog.d/13501.misc deleted file mode 100644 index 002b91960c..0000000000 --- a/changelog.d/13501.misc +++ /dev/null @@ -1 +0,0 @@ -Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. (#13365)' to give more time for clients to update. diff --git a/debian/changelog b/debian/changelog index 6e4bdc3a00..21115d7be2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.65.0~rc2) stable; urgency=medium + + * New Synapse release 1.65.0rc2. + + -- Synapse Packaging team Thu, 11 Aug 2022 11:38:18 +0100 + matrix-synapse-py3 (1.65.0~rc1) stable; urgency=medium * New Synapse release 1.65.0rc1. diff --git a/pyproject.toml b/pyproject.toml index ad2224f564..a9f59a676f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.65.0rc1" +version = "1.65.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From 507c1cb3309e989d84ec3ff9557a96ae1fc7f369 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 11 Aug 2022 11:42:24 +0100 Subject: Update the rejected state of events during resync (#13459) Events can be un-rejected or newly-rejected during resync, so ensure we update the database and caches when that happens. --- changelog.d/13459.misc | 1 + synapse/storage/databases/main/events_worker.py | 60 +++++++++++++++++++++++++ synapse/storage/databases/main/state.py | 5 +++ synapse/storage/state.py | 9 ---- 4 files changed, 66 insertions(+), 9 deletions(-) create mode 100644 changelog.d/13459.misc diff --git a/changelog.d/13459.misc b/changelog.d/13459.misc new file mode 100644 index 0000000000..e6082210a0 --- /dev/null +++ b/changelog.d/13459.misc @@ -0,0 +1 @@ +Faster joins: update the rejected state of events during de-partial-stating. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index e9ff6cfb34..b07d812ae2 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -2200,3 +2200,63 @@ class EventsWorkerStore(SQLBaseStore): (room_id,), ) return [row[0] for row in txn] + + def mark_event_rejected_txn( + self, + txn: LoggingTransaction, + event_id: str, + rejection_reason: Optional[str], + ) -> None: + """Mark an event that was previously accepted as rejected, or vice versa + + This can happen, for example, when resyncing state during a faster join. + + Args: + txn: + event_id: ID of event to update + rejection_reason: reason it has been rejected, or None if it is now accepted + """ + if rejection_reason is None: + logger.info( + "Marking previously-processed event %s as accepted", + event_id, + ) + self.db_pool.simple_delete_txn( + txn, + "rejections", + keyvalues={"event_id": event_id}, + ) + else: + logger.info( + "Marking previously-processed event %s as rejected(%s)", + event_id, + rejection_reason, + ) + self.db_pool.simple_upsert_txn( + txn, + table="rejections", + keyvalues={"event_id": event_id}, + values={ + "reason": rejection_reason, + "last_check": self._clock.time_msec(), + }, + ) + self.db_pool.simple_update_txn( + txn, + table="events", + keyvalues={"event_id": event_id}, + updatevalues={"rejection_reason": rejection_reason}, + ) + + self.invalidate_get_event_cache_after_txn(txn, event_id) + + # TODO(faster_joins): invalidate the cache on workers. Ideally we'd just + # call '_send_invalidation_to_replication', but we actually need the other + # end to call _invalidate_local_get_event_cache() rather than (just) + # _get_event_cache.invalidate(). + # + # One solution might be to (somehow) get the workers to call + # _invalidate_caches_for_event() (though that will invalidate more than + # strictly necessary). + # + # https://github.com/matrix-org/synapse/issues/12994 diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index f70705a0af..0b10af0e58 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -430,6 +430,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): updatevalues={"state_group": state_group}, ) + # the event may now be rejected where it was not before, or vice versa, + # in which case we need to update the rejected flags. + if bool(context.rejected) != (event.rejected_reason is not None): + self.mark_event_rejected_txn(txn, event.event_id, context.rejected) + self.db_pool.simple_delete_one_txn( txn, table="partial_state_events", diff --git a/synapse/storage/state.py b/synapse/storage/state.py index af3bab2c15..0004d955b4 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -539,15 +539,6 @@ class StateFilter: is_mine_id: a callable which confirms if a given state_key matches a mxid of a local user """ - - # TODO(faster_joins): it's not entirely clear that this is safe. In particular, - # there may be circumstances in which we return a piece of state that, once we - # resync the state, we discover is invalid. For example: if it turns out that - # the sender of a piece of state wasn't actually in the room, then clearly that - # state shouldn't have been returned. - # We should at least add some tests around this to see what happens. - # https://github.com/matrix-org/synapse/issues/13006 - # if we haven't requested membership events, then it depends on the value of # 'include_others' if EventTypes.Member not in self.types: -- cgit 1.4.1 From 4390121684cc1ea37b631716289896292b0ee1ec Mon Sep 17 00:00:00 2001 From: Germain Date: Thu, 11 Aug 2022 14:04:20 +0100 Subject: Add viewport directive to HTML templates to optimise for mobile (#13493) --- changelog.d/13493.misc | 1 + synapse/res/templates/account_previously_renewed.html | 13 ++++++++++++- synapse/res/templates/account_renewed.html | 13 ++++++++++++- synapse/res/templates/add_threepid.html | 11 ++++++++--- synapse/res/templates/add_threepid_failure.html | 15 ++++++++++----- synapse/res/templates/add_threepid_success.html | 14 ++++++++++---- synapse/res/templates/auth_success.html | 4 ++-- synapse/res/templates/invalid_token.html | 13 ++++++++++++- synapse/res/templates/notice_expiry.html | 2 ++ synapse/res/templates/notif_mail.html | 2 ++ synapse/res/templates/password_reset.html | 7 ++++++- synapse/res/templates/password_reset_confirmation.html | 8 ++++++-- synapse/res/templates/password_reset_failure.html | 8 ++++++-- synapse/res/templates/password_reset_success.html | 7 +++++-- synapse/res/templates/recaptcha.html | 4 ++-- synapse/res/templates/registration.html | 7 ++++++- synapse/res/templates/registration_failure.html | 7 +++++-- synapse/res/templates/registration_success.html | 8 ++++++-- synapse/res/templates/registration_token.html | 6 +++--- synapse/res/templates/sso_account_deactivated.html | 4 ++-- synapse/res/templates/sso_auth_account_details.html | 3 ++- synapse/res/templates/sso_auth_bad_user.html | 3 ++- synapse/res/templates/sso_auth_confirm.html | 3 ++- synapse/res/templates/sso_auth_success.html | 3 ++- synapse/res/templates/sso_error.html | 3 ++- synapse/res/templates/sso_login_idp_picker.html | 2 ++ synapse/res/templates/sso_new_user_consent.html | 3 ++- synapse/res/templates/sso_redirect_confirm.html | 3 ++- synapse/res/templates/terms.html | 4 ++-- synapse/static/client/login/index.html | 3 ++- synapse/static/client/register/index.html | 3 ++- 31 files changed, 140 insertions(+), 47 deletions(-) create mode 100644 changelog.d/13493.misc diff --git a/changelog.d/13493.misc b/changelog.d/13493.misc new file mode 100644 index 0000000000..d7d5c33a89 --- /dev/null +++ b/changelog.d/13493.misc @@ -0,0 +1 @@ +Modify HTML template content to better support mobile devices' screen sizes. \ No newline at end of file diff --git a/synapse/res/templates/account_previously_renewed.html b/synapse/res/templates/account_previously_renewed.html index b751359bdf..bd4f7cea97 100644 --- a/synapse/res/templates/account_previously_renewed.html +++ b/synapse/res/templates/account_previously_renewed.html @@ -1 +1,12 @@ -Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. + + + + + + + Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. + + + Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. + + \ No newline at end of file diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html index e8c0f52f05..57b319f375 100644 --- a/synapse/res/templates/account_renewed.html +++ b/synapse/res/templates/account_renewed.html @@ -1 +1,12 @@ -Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. + + + + + + + Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. + + + Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. + + \ No newline at end of file diff --git a/synapse/res/templates/add_threepid.html b/synapse/res/templates/add_threepid.html index cc4ab07e09..71f2215b7a 100644 --- a/synapse/res/templates/add_threepid.html +++ b/synapse/res/templates/add_threepid.html @@ -1,9 +1,14 @@ - + + + + + + + Request to add an email address to your Matrix account +

A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:

- {{ link }} -

If this was not you, you can safely ignore this email. Thank you.

diff --git a/synapse/res/templates/add_threepid_failure.html b/synapse/res/templates/add_threepid_failure.html index 441d11c846..bd627ee9ce 100644 --- a/synapse/res/templates/add_threepid_failure.html +++ b/synapse/res/templates/add_threepid_failure.html @@ -1,8 +1,13 @@ - - + + + + + + + Request failed + -

The request failed for the following reason: {{ failure_reason }}.

- -

No changes have been made to your account.

+

The request failed for the following reason: {{ failure_reason }}.

+

No changes have been made to your account.

diff --git a/synapse/res/templates/add_threepid_success.html b/synapse/res/templates/add_threepid_success.html index fbd6e4018f..49170c138e 100644 --- a/synapse/res/templates/add_threepid_success.html +++ b/synapse/res/templates/add_threepid_success.html @@ -1,6 +1,12 @@ - - + + + + + + + Your email has now been validated + -

Your email has now been validated, please return to your client. You may now close this window.

+

Your email has now been validated, please return to your client. You may now close this window.

- + \ No newline at end of file diff --git a/synapse/res/templates/auth_success.html b/synapse/res/templates/auth_success.html index baf4633142..2d6ac44a0e 100644 --- a/synapse/res/templates/auth_success.html +++ b/synapse/res/templates/auth_success.html @@ -1,8 +1,8 @@ Success! - + + diff --git a/synapse/res/templates/registration.html b/synapse/res/templates/registration.html index 16730a527f..20e831ff4a 100644 --- a/synapse/res/templates/registration.html +++ b/synapse/res/templates/registration.html @@ -1,4 +1,9 @@ - + + + Registration + + +

You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:

diff --git a/synapse/res/templates/registration_failure.html b/synapse/res/templates/registration_failure.html index 2833d79c37..a6ed22bc90 100644 --- a/synapse/res/templates/registration_failure.html +++ b/synapse/res/templates/registration_failure.html @@ -1,5 +1,8 @@ - - + + + + +

Validation failed for the following reason: {{ failure_reason }}.

diff --git a/synapse/res/templates/registration_success.html b/synapse/res/templates/registration_success.html index fbd6e4018f..d51d5549d8 100644 --- a/synapse/res/templates/registration_success.html +++ b/synapse/res/templates/registration_success.html @@ -1,5 +1,9 @@ - - + + + Your email has now been validated + + +

Your email has now been validated, please return to your client. You may now close this window.

diff --git a/synapse/res/templates/registration_token.html b/synapse/res/templates/registration_token.html index 4577ce1702..59a98f564c 100644 --- a/synapse/res/templates/registration_token.html +++ b/synapse/res/templates/registration_token.html @@ -1,8 +1,8 @@ - + Authentication - + + diff --git a/synapse/res/templates/sso_account_deactivated.html b/synapse/res/templates/sso_account_deactivated.html index c3e4deed93..075f801cec 100644 --- a/synapse/res/templates/sso_account_deactivated.html +++ b/synapse/res/templates/sso_account_deactivated.html @@ -3,8 +3,8 @@ SSO account deactivated - - diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index cf72df0a2a..2d1db386e1 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -3,7 +3,8 @@ Create your account - + + diff --git a/synapse/static/client/register/index.html b/synapse/static/client/register/index.html index 140653574d..27bbd76f51 100644 --- a/synapse/static/client/register/index.html +++ b/synapse/static/client/register/index.html @@ -2,7 +2,8 @@ Registration - + + -- cgit 1.4.1 From 953df2ad88fb9fc7d1a67966c79980136321119d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 11 Aug 2022 14:06:09 +0100 Subject: Add note to `redaction_retention_period` documentation mentioning that event purging runs at most every 5m (#13492) --- changelog.d/13492.doc | 1 + docs/usage/configuration/config_documentation.md | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/13492.doc diff --git a/changelog.d/13492.doc b/changelog.d/13492.doc new file mode 100644 index 0000000000..fc4850d556 --- /dev/null +++ b/changelog.d/13492.doc @@ -0,0 +1 @@ +Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 2af32a6155..bc3d2bec6a 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -759,6 +759,10 @@ allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] How long to keep redacted events in unredacted form in the database. After this period redacted events get replaced with their redacted form in the DB. +Synapse will check whether the rentention period has concluded for redacted +events every 5 minutes. Thus, even if this option is set to `0`, Synapse may +still take up to 5 minutes to purge redacted events from the database. + Defaults to `7d`. Set to `null` to disable. Example configuration: -- cgit 1.4.1 From e825f7366b589f61977f63d4fdc598abe052e6a0 Mon Sep 17 00:00:00 2001 From: James Barton Date: Thu, 11 Aug 2022 15:10:10 -0500 Subject: Add `openssl` example for registration HMAC (#13472) Signed-off-by: James Barton --- changelog.d/13472.doc | 1 + docs/admin_api/register_api.md | 21 +++++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13472.doc diff --git a/changelog.d/13472.doc b/changelog.d/13472.doc new file mode 100644 index 0000000000..2ff6317300 --- /dev/null +++ b/changelog.d/13472.doc @@ -0,0 +1 @@ +Add `openssl` example for generating registration HMAC digest. diff --git a/docs/admin_api/register_api.md b/docs/admin_api/register_api.md index c346090bb1..d7b7cf6a76 100644 --- a/docs/admin_api/register_api.md +++ b/docs/admin_api/register_api.md @@ -46,7 +46,24 @@ As an example: The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being the shared secret and the content being the nonce, user, password, either the string "admin" or "notadmin", and optionally the user_type -each separated by NULs. For an example of generation in Python: +each separated by NULs. + +Here is an easy way to generate the HMAC digest if you have Bash and OpenSSL: + +```bash +# Update these values and then paste this code block into a bash terminal +nonce='thisisanonce' +username='pepper_roni' +password='pizza' +admin='admin' +secret='shared_secret' + +printf '%s\0%s\0%s\0%s' "$nonce" "$username" "$password" "$admin" | + openssl sha1 -hmac "$secret" | + awk '{print $2}' +``` + +For an example of generation in Python: ```python import hmac, hashlib @@ -70,4 +87,4 @@ def generate_mac(nonce, user, password, admin=False, user_type=None): mac.update(user_type.encode('utf8')) return mac.hexdigest() -``` \ No newline at end of file +``` -- cgit 1.4.1 From 2c5e2ae898ca9c4de6043b4a3e51e67eae8a87f9 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 12 Aug 2022 13:25:47 +0000 Subject: Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. (#13515) --- changelog.d/13515.doc | 1 + docker/README.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13515.doc diff --git a/changelog.d/13515.doc b/changelog.d/13515.doc new file mode 100644 index 0000000000..a4d9d97dcb --- /dev/null +++ b/changelog.d/13515.doc @@ -0,0 +1 @@ +Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index 5b7de2fe38..017f046c58 100644 --- a/docker/README.md +++ b/docker/README.md @@ -191,7 +191,7 @@ If you need to build the image from a Synapse checkout, use the following `docke build` command from the repo's root: ``` -docker build -t matrixdotorg/synapse -f docker/Dockerfile . +DOCKER_BUILDKIT=1 docker build -t matrixdotorg/synapse -f docker/Dockerfile . ``` You can choose to build a different docker image by changing the value of the `-f` flag to -- cgit 1.4.1 From 434fd82d5f299cda8b95274930c266645620ea9d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 13 Aug 2022 21:50:20 +0100 Subject: Update grafana dashboard --- contrib/grafana/synapse.json | 4616 +++++++++++++++++++++++++----------------- 1 file changed, 2753 insertions(+), 1863 deletions(-) diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index 819426b8ea..0f23fc17ea 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -9,17 +9,18 @@ "pluginName": "Prometheus" } ], + "__elements": {}, "__requires": [ { "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "7.3.7" + "version": "9.0.4" }, { "type": "panel", "id": "graph", - "name": "Graph", + "name": "Graph (old)", "version": "" }, { @@ -33,13 +34,21 @@ "id": "prometheus", "name": "Prometheus", "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" } ], "annotations": { "list": [ { "builtIn": 1, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "enable": false, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -51,10 +60,9 @@ ] }, "editable": true, - "gnetId": null, + "fiscalYearStartMonth": 0, "graphTooltip": 0, "id": null, - "iteration": 1628606819564, "links": [ { "asDropdown": false, @@ -66,24 +74,16 @@ ], "title": "Dashboards", "type": "dashboards" - }, - { - "asDropdown": false, - "icon": "external link", - "includeVars": false, - "keepTime": false, - "tags": [], - "targetBlank": true, - "title": "Synapse Documentation", - "tooltip": "Open Documentation", - "type": "link", - "url": "https://matrix-org.github.io/synapse/latest/" } ], + "liveNow": false, "panels": [ { "collapsed": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, @@ -92,6 +92,15 @@ }, "id": 73, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Overview", "type": "row" }, @@ -108,12 +117,8 @@ "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, "gridPos": { "h": 9, @@ -132,6 +137,9 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le)", "format": "heatmap", "interval": "", @@ -149,31 +157,24 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { - "decimals": null, "format": "s", "logBase": 2, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "yBucketBound": "auto" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -207,7 +208,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -266,6 +267,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", "intervalFactor": 1, @@ -273,6 +277,9 @@ "refId": "D" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.9, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", "interval": "", @@ -281,6 +288,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.75, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", "intervalFactor": 1, @@ -288,6 +298,9 @@ "refId": "C" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.5, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", "intervalFactor": 1, @@ -295,21 +308,33 @@ "refId": "B" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.25, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "legendFormat": "25%", "refId": "F" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.05, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "legendFormat": "5%", "refId": "G" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))", "legendFormat": "Average", "refId": "H" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size]))", "hide": false, "instant": false, @@ -319,6 +344,7 @@ ], "thresholds": [ { + "$$hashKey": "object:283", "colorMode": "warning", "fill": false, "line": true, @@ -327,6 +353,7 @@ "yaxis": "left" }, { + "$$hashKey": "object:284", "colorMode": "critical", "fill": false, "line": true, @@ -335,9 +362,7 @@ "yaxis": "left" } ], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Event Send Time Quantiles (excluding errors, all workers)", "tooltip": { "shared": true, @@ -346,34 +371,30 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, + "$$hashKey": "object:255", "format": "s", "label": "", "logBase": 1, - "max": null, "min": "0", "show": true }, { + "$$hashKey": "object:256", "format": "hertz", "label": "", "logBase": 1, - "max": null, "min": "0", "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -381,10 +402,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -417,7 +439,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -427,6 +449,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -437,6 +462,7 @@ ], "thresholds": [ { + "$$hashKey": "object:566", "colorMode": "critical", "fill": true, "line": true, @@ -445,9 +471,7 @@ "yaxis": "left" } ], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "CPU usage", "tooltip": { "shared": false, @@ -456,34 +480,28 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, + "$$hashKey": "object:538", "format": "percentunit", - "label": null, "logBase": 1, "max": "1.5", "min": "0", "show": true }, { + "$$hashKey": "object:539", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -491,12 +509,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -530,7 +549,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -540,6 +559,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "interval": "", @@ -550,6 +572,9 @@ "target": "" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "hide": true, "interval": "", @@ -558,9 +583,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Memory", "tooltip": { "shared": false, @@ -570,31 +593,27 @@ "transformations": [], "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:1560", "format": "bytes", "logBase": 1, - "max": null, "min": "0", "show": true }, { + "$$hashKey": "object:1561", "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -602,10 +621,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -638,12 +658,13 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { + "$$hashKey": "object:639", "alias": "/max$/", "color": "#890F02", "fill": 0, @@ -655,6 +676,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, @@ -665,6 +689,9 @@ "step": 20 }, { + "datasource": { + "uid": "$datasource" + }, "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": true, @@ -676,9 +703,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Open FDs", "tooltip": { "shared": false, @@ -687,40 +712,35 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, + "$$hashKey": "object:650", "format": "none", "label": "", "logBase": 1, - "max": null, - "min": null, "show": true }, { - "decimals": null, + "$$hashKey": "object:651", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "gridPos": { "h": 1, "w": 24, @@ -734,12 +754,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -751,7 +772,7 @@ "h": 7, "w": 12, "x": 0, - "y": 25 + "y": 27 }, "hiddenSeries": false, "id": 5, @@ -777,15 +798,17 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { + "$$hashKey": "object:1240", "alias": "/user/" }, { + "$$hashKey": "object:1241", "alias": "/system/" } ], @@ -794,6 +817,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, @@ -803,6 +829,9 @@ "step": 20 }, { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "hide": false, @@ -815,6 +844,7 @@ ], "thresholds": [ { + "$$hashKey": "object:1278", "colorMode": "custom", "fillColor": "rgba(255, 255, 255, 1)", "line": true, @@ -824,6 +854,7 @@ "yaxis": "left" }, { + "$$hashKey": "object:1279", "colorMode": "custom", "fillColor": "rgba(255, 255, 255, 1)", "line": true, @@ -833,6 +864,7 @@ "yaxis": "left" }, { + "$$hashKey": "object:1498", "colorMode": "critical", "fill": true, "line": true, @@ -841,9 +873,7 @@ "yaxis": "left" } ], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "CPU", "tooltip": { "shared": false, @@ -852,15 +882,13 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, + "$$hashKey": "object:1250", "format": "percentunit", "label": "", "logBase": 1, @@ -869,71 +897,113 @@ "show": true }, { + "$$hashKey": "object:1251", "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan", "fieldConfig": { "defaults": { - "custom": {}, - "links": [] + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 25 + "y": 27 }, - "hiddenSeries": false, "id": 105, "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, "links": [], - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "7.3.7", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "8.3.2", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "histogram_quantile(0.999, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", + "hide": false, + "interval": "", + "legendFormat": "{{job}}-{{index}} 99.9%", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", "format": "time_series", "interval": "", @@ -943,13 +1013,23 @@ "step": 20 }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", "format": "time_series", + "interval": "", "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} 95%", "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, @@ -957,6 +1037,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "expr": "rate(python_twisted_reactor_tick_time_sum{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]) / rate(python_twisted_reactor_tick_time_count{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, @@ -964,58 +1048,21 @@ "refId": "D" } ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, "title": "Reactor tick quantiles", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } + "type": "timeseries" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -1027,7 +1074,7 @@ "h": 7, "w": 12, "x": 0, - "y": 32 + "y": 34 }, "hiddenSeries": false, "id": 34, @@ -1049,7 +1096,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -1059,6 +1106,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "interval": "", @@ -1069,6 +1119,9 @@ "target": "" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "interval": "", "legendFormat": "total", @@ -1076,9 +1129,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Memory", "tooltip": { "shared": false, @@ -1088,9 +1139,7 @@ "transformations": [], "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -1098,21 +1147,17 @@ { "format": "bytes", "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1120,10 +1165,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -1134,7 +1180,7 @@ "h": 7, "w": 12, "x": 12, - "y": 32 + "y": 34 }, "hiddenSeries": false, "id": 49, @@ -1156,7 +1202,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -1172,6 +1218,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "interval": "", @@ -1182,9 +1231,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Prometheus scrape time", "tooltip": { "shared": false, @@ -1193,18 +1240,14 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "s", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, @@ -1219,8 +1262,7 @@ } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1228,10 +1270,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -1242,7 +1285,7 @@ "h": 7, "w": 12, "x": 0, - "y": 39 + "y": 41 }, "hiddenSeries": false, "id": 53, @@ -1264,7 +1307,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -1274,6 +1317,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 2, @@ -1282,9 +1328,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Up", "tooltip": { "shared": false, @@ -1293,33 +1337,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1327,10 +1362,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -1341,7 +1377,7 @@ "h": 7, "w": 12, "x": 12, - "y": 39 + "y": 41 }, "hiddenSeries": false, "id": 120, @@ -1362,7 +1398,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -1372,6 +1408,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "hide": false, @@ -1381,6 +1420,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "hide": false, @@ -1401,9 +1443,7 @@ "yaxis": "left" } ], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Stacked CPU usage", "tooltip": { "shared": false, @@ -1412,33 +1452,26 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:572", "format": "percentunit", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:573", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1446,10 +1479,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -1460,7 +1494,7 @@ "h": 7, "w": 12, "x": 0, - "y": 46 + "y": 48 }, "hiddenSeries": false, "id": 136, @@ -1481,7 +1515,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -1491,20 +1525,24 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_client_requests{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "legendFormat": "{{job}}-{{index}} {{method}}", "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_matrixfederationclient_requests{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "legendFormat": "{{job}}-{{index}} {{method}} (federation)", "refId": "B" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Outgoing HTTP request rate", "tooltip": { "shared": false, @@ -1513,43 +1551,133 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "reqps", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "active threads", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 48 + }, + "id": 207, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "synapse_threadpool_working_threads{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "interval": "", + "legendFormat": "{{job}}-{{index}} {{name}}", + "refId": "A" + } + ], + "title": "Threadpool activity", + "type": "timeseries" + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "refId": "A" } ], - "repeat": null, "title": "Process info", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "gridPos": { "h": 1, "w": 24, @@ -1571,18 +1699,14 @@ "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 21 + "y": 28 }, "heatmap": {}, "hideZeroBuckets": false, @@ -1595,6 +1719,9 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)", "format": "heatmap", "intervalFactor": 1, @@ -1611,33 +1738,26 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { - "decimals": null, "format": "s", "logBase": 2, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "yBucketBound": "auto" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "", "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -1649,7 +1769,7 @@ "h": 9, "w": 12, "x": 12, - "y": 21 + "y": 28 }, "hiddenSeries": false, "id": 33, @@ -1671,7 +1791,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -1681,6 +1801,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)", "format": "time_series", "interval": "", @@ -1692,9 +1815,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Events Persisted (all workers)", "tooltip": { "shared": true, @@ -1703,31 +1824,26 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:102", "format": "hertz", "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:103", "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1735,21 +1851,17 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, + "decimals": 1, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 30 + "y": 37 }, "hiddenSeries": false, "id": 40, @@ -1770,7 +1882,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -1780,6 +1892,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 2, @@ -1788,9 +1903,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Events/s Local vs Remote", "tooltip": { "shared": true, @@ -1799,9 +1912,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -1810,22 +1921,17 @@ "format": "hertz", "label": "", "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1833,21 +1939,17 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, + "decimals": 1, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 30 + "y": 37 }, "hiddenSeries": false, "id": 46, @@ -1868,7 +1970,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -1878,6 +1980,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "instant": false, @@ -1888,9 +1993,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Events/s by Type", "tooltip": { "shared": false, @@ -1899,33 +2002,25 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1935,21 +2030,17 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, + "decimals": 1, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 37 + "y": 44 }, "hiddenSeries": false, "id": 44, @@ -1973,7 +2064,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -1983,6 +2074,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "intervalFactor": 2, @@ -1992,9 +2086,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Events/s by Origin", "tooltip": { "shared": false, @@ -2003,33 +2095,25 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2037,21 +2121,17 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, + "decimals": 1, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 37 + "y": 44 }, "hiddenSeries": false, "id": 45, @@ -2075,7 +2155,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -2085,6 +2165,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)", "format": "time_series", "intervalFactor": 2, @@ -2094,9 +2177,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Memberships/s by Origin", "tooltip": { "shared": true, @@ -2105,33 +2186,25 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2139,10 +2212,12 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -2153,7 +2228,7 @@ "h": 9, "w": 12, "x": 0, - "y": 44 + "y": 51 }, "hiddenSeries": false, "id": 118, @@ -2175,7 +2250,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -2191,6 +2266,11 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", "format": "time_series", "interval": "", @@ -2199,6 +2279,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "expr": "histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", "format": "time_series", "interval": "", @@ -2207,6 +2291,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "expr": "histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", "format": "time_series", "intervalFactor": 1, @@ -2214,6 +2302,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "expr": "histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", "format": "time_series", "intervalFactor": 1, @@ -2221,6 +2313,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)", "format": "time_series", "intervalFactor": 1, @@ -2229,9 +2325,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Event send time quantiles by worker", "tooltip": { "shared": true, @@ -2240,75 +2334,186 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:263", "format": "s", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:264", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } - } - ], - "repeat": null, - "title": "Event persistence", - "type": "row" - }, - { - "collapsed": true, - "datasource": "${DS_PROMETHEUS}", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 28 - }, - "id": 57, - "panels": [ + }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": null, - "editable": true, - "error": false, + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "description": "CPU and DB time spent on most expensive state resolution in a room, summed over all workers. This is a very rough proxy for \"how fast is state res\", but it doesn't accurately represent the system load (e.g. it completely ignores cheap state resolutions).\n", "fieldConfig": { "defaults": { - "custom": {}, - "links": [] + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s/s" }, "overrides": [] }, - "fill": 2, - "fillGradient": 0, - "grid": {}, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 51 + }, + "id": 222, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "exemplar": false, + "expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds{instance=\"$instance\"}[1m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "DB time", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "exemplar": false, + "expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds{instance=\"$instance\"}[1m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "CPU time", + "refId": "C" + } + ], + "title": "Stateres worst-case", + "type": "timeseries" + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "refId": "A" + } + ], + "title": "Event persistence", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 28 + }, + "id": 57, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "uid": "$datasource" + }, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "grid": {}, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 31 + "y": 29 }, "hiddenSeries": false, "id": 4, @@ -2333,7 +2538,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -2343,6 +2548,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -2370,9 +2578,7 @@ "yaxis": "left" } ], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Request Count by arrival time", "tooltip": { "shared": false, @@ -2381,9 +2587,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2391,21 +2595,16 @@ { "format": "hertz", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2413,12 +2612,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -2430,7 +2630,7 @@ "h": 8, "w": 12, "x": 12, - "y": 31 + "y": 29 }, "hiddenSeries": false, "id": 32, @@ -2451,7 +2651,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -2461,6 +2661,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})", "format": "time_series", "intervalFactor": 2, @@ -2471,9 +2674,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Top 10 Request Counts", "tooltip": { "shared": false, @@ -2482,9 +2683,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2492,21 +2691,16 @@ { "format": "hertz", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2514,13 +2708,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": null, + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -2532,7 +2726,7 @@ "h": 8, "w": 12, "x": 0, - "y": 39 + "y": 37 }, "hiddenSeries": false, "id": 139, @@ -2557,7 +2751,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -2567,6 +2761,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_server_in_flight_requests_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -2594,9 +2791,7 @@ "yaxis": "left" } ], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Total CPU Usage by Endpoint", "tooltip": { "shared": false, @@ -2605,9 +2800,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2615,21 +2808,16 @@ { "format": "percentunit", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2637,13 +2825,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": null, + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -2655,7 +2843,7 @@ "h": 8, "w": 12, "x": 12, - "y": 39 + "y": 37 }, "hiddenSeries": false, "id": 52, @@ -2680,7 +2868,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -2690,6 +2878,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -2717,9 +2908,7 @@ "yaxis": "left" } ], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Average CPU Usage by Endpoint", "tooltip": { "shared": false, @@ -2728,9 +2917,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2738,21 +2925,16 @@ { "format": "s", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2760,12 +2942,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -2777,7 +2960,7 @@ "h": 8, "w": 12, "x": 0, - "y": 47 + "y": 45 }, "hiddenSeries": false, "id": 7, @@ -2801,7 +2984,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -2811,6 +2994,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -2821,9 +3007,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "DB Usage by endpoint", "tooltip": { "shared": false, @@ -2832,9 +3016,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2842,21 +3024,16 @@ { "format": "percentunit", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2864,13 +3041,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": null, + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -2882,7 +3059,7 @@ "h": 8, "w": 12, "x": 12, - "y": 47 + "y": 45 }, "hiddenSeries": false, "id": 47, @@ -2907,7 +3084,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -2917,6 +3094,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "(sum(rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))/(sum(rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))", "format": "time_series", "hide": false, @@ -2928,9 +3108,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Non-sync avg response time", "tooltip": { "shared": false, @@ -2939,9 +3117,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2949,21 +3125,16 @@ { "format": "s", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2971,10 +3142,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -2985,7 +3157,7 @@ "h": 9, "w": 12, "x": 0, - "y": 55 + "y": 53 }, "hiddenSeries": false, "id": 103, @@ -3006,7 +3178,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -3023,6 +3195,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "format": "time_series", "interval": "", @@ -3031,6 +3206,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(avg_over_time(synapse_http_server_in_flight_requests_count{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "interval": "", "legendFormat": "Total", @@ -3038,9 +3216,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Requests in flight", "tooltip": { "shared": false, @@ -3049,43 +3225,45 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], - "repeat": null, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "refId": "A" + } + ], "title": "Requests", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "gridPos": { "h": 1, "w": 24, @@ -3099,10 +3277,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3113,7 +3292,7 @@ "h": 9, "w": 12, "x": 0, - "y": 32 + "y": 5 }, "hiddenSeries": false, "id": 99, @@ -3130,9 +3309,12 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3142,6 +3324,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -3151,9 +3336,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "CPU usage by background jobs", "tooltip": { "shared": false, @@ -3162,33 +3345,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "percentunit", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -3196,10 +3370,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3210,7 +3385,7 @@ "h": 9, "w": 12, "x": 12, - "y": 32 + "y": 5 }, "hiddenSeries": false, "id": 101, @@ -3227,9 +3402,12 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3239,6 +3417,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "hide": false, @@ -3248,9 +3429,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "DB usage by background jobs (including scheduling time)", "tooltip": { "shared": false, @@ -3259,33 +3438,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "percentunit", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -3293,10 +3463,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3307,7 +3478,7 @@ "h": 8, "w": 12, "x": 0, - "y": 41 + "y": 14 }, "hiddenSeries": false, "id": 138, @@ -3323,8 +3494,11 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "8.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -3334,15 +3508,16 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_background_process_in_flight_count{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "legendFormat": "{{job}}-{{index}} {{name}}", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Background jobs in flight", "tooltip": { "shared": false, @@ -3351,42 +3526,45 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "refId": "A" + } + ], "title": "Background jobs", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "gridPos": { "h": 1, "w": 24, @@ -3400,10 +3578,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3436,7 +3615,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3446,6 +3625,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, @@ -3453,15 +3635,16 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_util_metrics_block_count{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))", "legendFormat": "failed txn rate", "refId": "B" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Outgoing federation transaction rate", "tooltip": { "shared": true, @@ -3470,33 +3653,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -3504,10 +3678,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3540,7 +3715,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3550,6 +3725,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_federation_server_received_pdus{instance=~\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, @@ -3557,6 +3735,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_federation_server_received_edus{instance=~\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, @@ -3565,9 +3746,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Incoming PDU/EDU rate", "tooltip": { "shared": true, @@ -3576,33 +3755,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -3610,10 +3780,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3646,7 +3817,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3656,6 +3827,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", @@ -3664,6 +3838,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_federation_client_sent_edus{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, @@ -3672,9 +3849,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Outgoing PDU/EDU rate", "tooltip": { "shared": true, @@ -3683,33 +3858,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -3717,10 +3883,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3753,7 +3920,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3763,6 +3930,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_federation_client_sent_edus_by_type{instance=\"$instance\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -3772,9 +3942,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Outgoing EDUs by type", "tooltip": { "shared": true, @@ -3783,33 +3951,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -3817,11 +3976,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "description": "The number of events in the in-memory queues ", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3852,7 +4013,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -3862,12 +4023,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", "legendFormat": "pending PDUs {{job}}-{{index}}", "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", "legendFormat": "pending EDUs {{job}}-{{index}}", @@ -3875,9 +4044,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "In-memory federation transmission queues", "tooltip": { "shared": true, @@ -3886,9 +4053,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3897,7 +4062,6 @@ "format": "short", "label": "events", "logBase": 1, - "max": null, "min": "0", "show": true }, @@ -3905,14 +4069,11 @@ "format": "short", "label": "", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -3920,11 +4081,12 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "Number of events queued up on the master process for processing by the federation sender", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -3957,7 +4119,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3967,6 +4129,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_federation_send_queue_presence_changed_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "interval": "", @@ -3975,6 +4140,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_federation_send_queue_presence_map_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, @@ -3984,6 +4152,9 @@ "refId": "B" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_federation_send_queue_presence_destinations_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, @@ -3993,6 +4164,9 @@ "refId": "E" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_federation_send_queue_keyed_edu_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, @@ -4002,6 +4176,9 @@ "refId": "C" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_federation_send_queue_edus_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, @@ -4011,6 +4188,9 @@ "refId": "D" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_federation_send_queue_pos_time_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, @@ -4021,9 +4201,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Outgoing EDU queues on master", "tooltip": { "shared": true, @@ -4032,39 +4210,30 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "none", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { "cards": { - "cardPadding": -1, - "cardRound": null + "cardPadding": -1 }, "color": { "cardColor": "#b4ff00", @@ -4075,12 +4244,8 @@ "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, "gridPos": { "h": 9, @@ -4099,6 +4264,9 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_event_processing_lag_by_event_bucket{instance=\"$instance\",name=\"federation_sender\"}[$bucket_size])) by (le)", "format": "heatmap", "instant": false, @@ -4118,30 +4286,24 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { "decimals": 0, "format": "s", "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "yBucketBound": "auto" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -4175,7 +4337,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4226,6 +4388,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.99, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", "interval": "", @@ -4234,6 +4399,9 @@ "refId": "D" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.9, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", "interval": "", @@ -4242,6 +4410,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.75, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", "interval": "", @@ -4250,6 +4421,9 @@ "refId": "C" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.5, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", "interval": "", @@ -4258,18 +4432,27 @@ "refId": "B" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.25, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "interval": "", "legendFormat": "25%", "refId": "F" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.05, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "interval": "", "legendFormat": "5%", "refId": "G" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_event_processing_lag_by_event_sum{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_event_processing_lag_by_event_count{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "interval": "", "legendFormat": "Average", @@ -4294,9 +4477,7 @@ "yaxis": "left" } ], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Federation send PDU lag quantiles", "tooltip": { "shared": true, @@ -4305,19 +4486,15 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, "format": "s", "label": "", "logBase": 1, - "max": null, "min": "0", "show": true }, @@ -4325,20 +4502,17 @@ "format": "hertz", "label": "", "logBase": 1, - "max": null, "min": "0", "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { "cards": { - "cardPadding": -1, - "cardRound": null + "cardPadding": -1 }, "color": { "cardColor": "#b4ff00", @@ -4349,12 +4523,8 @@ "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, "gridPos": { "h": 9, @@ -4373,6 +4543,9 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_federation_server_pdu_process_time_bucket{instance=\"$instance\"}[$bucket_size])) by (le)", "format": "heatmap", "instant": false, @@ -4392,32 +4565,26 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { "decimals": 0, "format": "s", "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "yBucketBound": "auto" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -4451,7 +4618,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4461,6 +4628,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", "interval": "", @@ -4471,9 +4641,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Age of oldest event in staging area", "tooltip": { "msResolution": false, @@ -4483,33 +4651,27 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:92", "format": "ms", - "label": null, "logBase": 1, - "max": null, "min": 0, "show": true }, { + "$$hashKey": "object:93", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -4517,12 +4679,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -4556,7 +4719,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4566,6 +4729,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", "interval": "", @@ -4576,9 +4742,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Number of events in federation staging area", "tooltip": { "msResolution": false, @@ -4588,33 +4752,27 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:92", "format": "none", - "label": null, "logBase": 1, - "max": null, "min": 0, "show": true }, { + "$$hashKey": "object:93", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -4622,12 +4780,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "type": "prometheus", + "uid": "$datasource" }, "fill": 1, "fillGradient": 0, @@ -4655,7 +4810,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -4665,6 +4820,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "expr": "sum(rate(synapse_federation_soft_failed_events_total{instance=\"$instance\"}[$bucket_size]))", "interval": "", "legendFormat": "soft-failed events", @@ -4672,9 +4831,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Soft-failed event rate", "tooltip": { "shared": true, @@ -4683,42 +4840,47 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:131", "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:132", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "refId": "A" + } + ], "title": "Federation", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "gridPos": { "h": 1, "w": 24, @@ -4732,10 +4894,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -4746,7 +4909,7 @@ "h": 8, "w": 12, "x": 0, - "y": 34 + "y": 32 }, "hiddenSeries": false, "id": 51, @@ -4763,9 +4926,12 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4775,6 +4941,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0", "format": "time_series", "interval": "", @@ -4784,6 +4953,9 @@ "step": 20 }, { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_http_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0", "format": "time_series", "intervalFactor": 2, @@ -4793,9 +4965,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "HTTP Push rate", "tooltip": { "shared": true, @@ -4804,33 +4974,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -4838,11 +4999,12 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -4853,7 +5015,7 @@ "h": 8, "w": 12, "x": 12, - "y": 34 + "y": 32 }, "hiddenSeries": false, "id": 134, @@ -4870,8 +5032,11 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "8.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -4881,15 +5046,16 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "topk(10,synapse_pushers{job=~\"$job\",index=~\"$index\", instance=\"$instance\"})", "legendFormat": "{{kind}} {{app_id}}", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Active pusher instances by app", "tooltip": { "shared": false, @@ -4898,60 +5064,65 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], - "repeat": null, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "refId": "A" + } + ], "title": "Pushes", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 32 }, - "id": 58, + "id": 219, "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "How many entries in current state that we are iterating over while calculating push rules.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -4965,13 +5136,13 @@ "y": 33 }, "hiddenSeries": false, - "id": 48, + "id": 209, "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": true, + "show": false, "total": false, "values": false }, @@ -4984,7 +5155,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4994,19 +5165,24 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", + "interval": "", "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", + "legendFormat": "{{index}}", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter", "refId": "A", - "step": 20 + "step": 2 } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Avg time waiting for db conn", + "title": "Iterations over State", "tooltip": { "shared": true, "sort": 0, @@ -5014,34 +5190,26 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, - "format": "s", + "format": "hertz", "label": "", "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, - "show": false + "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -5049,11 +5217,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Rate that the cached push rules for a room get invalidated due to underlying push rules being changed. ", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5067,14 +5237,13 @@ "y": 33 }, "hiddenSeries": false, - "id": 104, + "id": 211, "legend": { - "alignAsTable": true, "avg": false, "current": false, "max": false, "min": false, - "show": true, + "show": false, "total": false, "values": false }, @@ -5087,7 +5256,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -5097,29 +5266,629 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "exemplar": true, + "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{job}} {{index}} 99%", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{index}}", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", "refId": "A", - "step": 20 - }, - { - "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}} {{index}} 95%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}} {{index}} 90%", - "refId": "C" - }, - { + "step": 2 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Push Rule Invalidations", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "hertz", + "label": "", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "How often the \"delta optimisation\" happens.\n\nThe delta optimisation is when we update the push rules for a room incrementally after a state change where we know the delta between the old state and the new state.\n\nThis can't happen if we don't the delta or we're calculating push rules from scratch.", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 40 + }, + "hiddenSeries": false, + "id": 213, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "8.4.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Number of calls", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Hit Rate", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", + "refId": "A", + "step": 2 + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Number of calls", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Delta Optimisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": "", + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "hertz", + "label": "", + "logBase": 1, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "How often we have the correct cached push rules for a room.", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 40 + }, + "hiddenSeries": false, + "id": 215, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "8.4.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Number of calls", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Hit Rate", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", + "refId": "A", + "step": 2 + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Number of calls", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "How often we reuse existing calculated push rules", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": "", + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "hertz", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "How often we have existing cached push rules for the room. \n\nNote that these might be outdated and need to be recalculated if the state has changed.", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 47 + }, + "hiddenSeries": false, + "id": 217, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "8.4.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Number of calls", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Hit Rate", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", + "refId": "A", + "step": 2 + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Number of calls", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "How often we have the RulesForRoom cached", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": "", + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "hertz", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], + "title": "Push Rule Cache", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 33 + }, + "id": 58, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "uid": "$datasource" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.0.4", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$datasource" + }, + "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg time waiting for db conn", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": "", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "uid": "$datasource" + }, + "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 9 + }, + "hiddenSeries": false, + "id": 104, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "9.0.4", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "uid": "$datasource" + }, + "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 99%", + "refId": "A", + "step": 20 + }, + { + "datasource": { + "uid": "$datasource" + }, + "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 95%", + "refId": "B" + }, + { + "datasource": { + "uid": "$datasource" + }, + "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 90%", + "refId": "C" + }, + { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -5129,9 +5898,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Db scheduling time quantiles", "tooltip": { "shared": false, @@ -5140,34 +5907,26 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, "format": "s", "label": "", "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -5175,12 +5934,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5192,7 +5952,7 @@ "h": 7, "w": 12, "x": 0, - "y": 40 + "y": 16 }, "hiddenSeries": false, "id": 10, @@ -5216,7 +5976,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -5226,6 +5986,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", "interval": "", @@ -5236,9 +5999,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Top DB transactions by txn rate", "tooltip": { "shared": false, @@ -5247,9 +6008,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5257,21 +6016,17 @@ { "format": "hertz", "logBase": 1, - "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -5279,12 +6034,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5296,7 +6052,7 @@ "h": 7, "w": 12, "x": 12, - "y": 40 + "y": 16 }, "hiddenSeries": false, "id": 11, @@ -5320,7 +6076,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -5330,6 +6086,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "instant": false, @@ -5341,9 +6100,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "DB transactions by total txn time", "tooltip": { "shared": false, @@ -5352,9 +6109,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5362,21 +6117,16 @@ { "format": "percentunit", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -5384,12 +6134,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5401,7 +6152,7 @@ "h": 7, "w": 12, "x": 0, - "y": 47 + "y": 23 }, "hiddenSeries": false, "id": 180, @@ -5425,7 +6176,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -5435,6 +6186,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "instant": false, @@ -5446,9 +6200,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Average DB txn time", "tooltip": { "shared": false, @@ -5457,9 +6209,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5467,21 +6217,16 @@ { "format": "s", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -5489,10 +6234,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5503,7 +6249,7 @@ "h": 9, "w": 12, "x": 12, - "y": 47 + "y": 23 }, "hiddenSeries": false, "id": 200, @@ -5524,7 +6270,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -5534,6 +6280,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.99, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", "intervalFactor": 1, @@ -5541,6 +6290,9 @@ "refId": "D" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.9, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", "intervalFactor": 1, @@ -5548,6 +6300,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.75, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", "intervalFactor": 1, @@ -5555,6 +6310,9 @@ "refId": "C" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.5, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", "intervalFactor": 1, @@ -5563,9 +6321,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Time waiting for DB connection quantiles", "tooltip": { "shared": true, @@ -5574,49 +6330,54 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, + "$$hashKey": "object:203", "format": "s", "label": "", "logBase": 1, - "max": null, "min": "0", "show": true }, { + "$$hashKey": "object:204", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], - "repeat": null, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Database", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 33 + "y": 34 }, "id": 59, "panels": [ @@ -5625,12 +6386,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5642,7 +6404,7 @@ "h": 13, "w": 12, "x": 0, - "y": 9 + "y": 10 }, "hiddenSeries": false, "id": 12, @@ -5660,9 +6422,12 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -5672,6 +6437,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])", "format": "time_series", "interval": "", @@ -5682,9 +6450,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Total CPU Usage by Block", "tooltip": { "shared": true, @@ -5693,9 +6459,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5703,21 +6467,16 @@ { "format": "percentunit", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -5725,12 +6484,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5742,7 +6502,7 @@ "h": 13, "w": 12, "x": 12, - "y": 9 + "y": 10 }, "hiddenSeries": false, "id": 26, @@ -5760,9 +6520,12 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -5772,6 +6535,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])", "format": "time_series", "interval": "", @@ -5782,9 +6548,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Average CPU Time per Block", "tooltip": { "shared": true, @@ -5793,9 +6557,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5803,21 +6565,16 @@ { "format": "ms", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -5825,12 +6582,14 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5842,7 +6601,7 @@ "h": 13, "w": 12, "x": 0, - "y": 22 + "y": 23 }, "hiddenSeries": false, "id": 13, @@ -5860,9 +6619,12 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -5872,19 +6634,22 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, - "legendFormat": "{{job}} {{block_name}}", + "legendFormat": "{{job}}-{{index}} {{block_name}}", "refId": "A", "step": 20 } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Total DB Usage by Block", "tooltip": { "shared": true, @@ -5893,31 +6658,27 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:196", "format": "percentunit", "logBase": 1, - "max": null, "min": 0, "show": true }, { + "$$hashKey": "object:197", "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -5925,13 +6686,14 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "The time each database transaction takes to execute, on average, broken down by metrics block.", "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -5943,7 +6705,7 @@ "h": 13, "w": 12, "x": 12, - "y": 22 + "y": 23 }, "hiddenSeries": false, "id": 27, @@ -5961,9 +6723,12 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -5973,6 +6738,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -5983,9 +6751,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Average Database Transaction time, by Block", "tooltip": { "shared": true, @@ -5994,9 +6760,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6004,21 +6768,16 @@ { "format": "ms", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -6026,12 +6785,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -6043,7 +6803,7 @@ "h": 13, "w": 12, "x": 0, - "y": 35 + "y": 36 }, "hiddenSeries": false, "id": 28, @@ -6060,9 +6820,12 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -6072,6 +6835,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -6082,9 +6848,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Average Transactions per Block", "tooltip": { "shared": false, @@ -6093,9 +6857,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6103,21 +6865,16 @@ { "format": "none", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -6125,12 +6882,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -6142,7 +6900,7 @@ "h": 13, "w": 12, "x": 12, - "y": 35 + "y": 36 }, "hiddenSeries": false, "id": 25, @@ -6159,9 +6917,12 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -6171,6 +6932,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])", "format": "time_series", "interval": "", @@ -6181,9 +6945,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Average Wallclock Time per Block", "tooltip": { "shared": false, @@ -6192,9 +6954,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6202,21 +6962,16 @@ { "format": "ms", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -6224,12 +6979,8 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, "fill": 1, "fillGradient": 0, @@ -6237,7 +6988,7 @@ "h": 15, "w": 12, "x": 0, - "y": 48 + "y": 49 }, "hiddenSeries": false, "id": 154, @@ -6254,8 +7005,11 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "8.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -6265,6 +7019,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "interval": "", "legendFormat": "{{job}}-{{index}} {{block_name}}", @@ -6272,9 +7029,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Block count", "tooltip": { "shared": true, @@ -6283,48 +7038,50 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], - "repeat": null, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Per-block metrics", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 34 + "y": 35 }, "id": 61, "panels": [ @@ -6333,13 +7090,14 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "decimals": 2, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -6351,7 +7109,7 @@ "h": 10, "w": 12, "x": 0, - "y": 35 + "y": 36 }, "hiddenSeries": false, "id": 1, @@ -6375,7 +7133,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -6385,6 +7143,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "intervalFactor": 2, @@ -6394,9 +7155,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Cache Hit Ratio", "tooltip": { "msResolution": true, @@ -6406,15 +7165,12 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, "format": "percentunit", "label": "", "logBase": 1, @@ -6425,14 +7181,11 @@ { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -6440,12 +7193,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -6457,7 +7211,7 @@ "h": 10, "w": 12, "x": 12, - "y": 35 + "y": 36 }, "hiddenSeries": false, "id": 8, @@ -6480,7 +7234,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -6490,6 +7244,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, @@ -6501,9 +7258,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Cache Size", "tooltip": { "shared": false, @@ -6512,9 +7267,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6522,21 +7275,17 @@ { "format": "short", "logBase": 1, - "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -6544,12 +7293,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -6561,7 +7311,7 @@ "h": 10, "w": 12, "x": 0, - "y": 45 + "y": 46 }, "hiddenSeries": false, "id": 38, @@ -6584,7 +7334,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -6594,6 +7344,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -6604,9 +7357,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Cache request rate", "tooltip": { "shared": false, @@ -6615,9 +7366,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6625,21 +7374,17 @@ { "format": "rps", "logBase": 1, - "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -6647,10 +7392,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -6661,7 +7407,7 @@ "h": 10, "w": 12, "x": 12, - "y": 45 + "y": 46 }, "hiddenSeries": false, "id": 39, @@ -6683,7 +7429,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -6693,6 +7439,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "topk(10, rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", @@ -6703,9 +7452,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Top 10 cache misses", "tooltip": { "shared": false, @@ -6714,33 +7461,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "rps", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -6748,10 +7486,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -6762,7 +7501,7 @@ "h": 9, "w": 12, "x": 0, - "y": 55 + "y": 56 }, "hiddenSeries": false, "id": 65, @@ -6784,7 +7523,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 5, "points": false, "renderer": "flot", @@ -6794,17 +7533,19 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", + "interval": "", "intervalFactor": 1, "legendFormat": "{{name}} ({{reason}}) {{job}}-{{index}}", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Cache eviction rate", "tooltip": { "shared": false, @@ -6813,49 +7554,51 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, "format": "hertz", "label": "entries / second", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], - "repeat": null, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Caches", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 35 + "y": 36 }, "id": 148, "panels": [ @@ -6864,7 +7607,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -6908,6 +7653,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_util_caches_response_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", "legendFormat": "{{name}} {{job}}-{{index}}", @@ -6915,9 +7663,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Response cache size", "tooltip": { "shared": false, @@ -6926,33 +7672,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -6960,7 +7697,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -7004,12 +7743,18 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_util_caches_response_cache:hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache:total{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])", "interval": "", "legendFormat": "{{name}} {{job}}-{{index}}", "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "", "interval": "", "legendFormat": "", @@ -7017,9 +7762,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Response cache hit rate", "tooltip": { "shared": false, @@ -7028,17 +7771,13 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, "format": "percentunit", - "label": null, "logBase": 1, "max": "1", "min": "0", @@ -7046,30 +7785,38 @@ }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Response caches", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 36 + "y": 37 }, "id": 62, "panels": [ @@ -7078,7 +7825,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -7123,6 +7872,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])", "format": "time_series", "instant": false, @@ -7132,9 +7884,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Total GC time by bucket (10m smoothing)", "tooltip": { "shared": true, @@ -7143,34 +7893,25 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, "format": "percentunit", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -7178,7 +7919,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "decimals": 3, "editable": true, "error": false, @@ -7228,6 +7971,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])", "format": "time_series", "intervalFactor": 2, @@ -7238,9 +7984,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Average GC Time Per Collection", "tooltip": { "shared": false, @@ -7249,9 +7993,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7259,21 +8001,16 @@ { "format": "s", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -7281,7 +8018,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.", "fieldConfig": { "defaults": { @@ -7334,6 +8073,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", "intervalFactor": 1, @@ -7342,9 +8084,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Allocation counts", "tooltip": { "shared": false, @@ -7353,9 +8093,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7364,23 +8102,17 @@ "format": "short", "label": "Gen N-1 GCs since last Gen N GC", "logBase": 1, - "max": null, - "min": null, "show": true }, { - "decimals": null, "format": "short", "label": "Objects since last Gen 0 GC", "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -7388,7 +8120,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -7433,6 +8167,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, @@ -7441,9 +8178,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Object counts per collection", "tooltip": { "shared": true, @@ -7452,33 +8187,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -7486,7 +8212,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -7531,6 +8259,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, @@ -7539,9 +8270,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "GC frequency", "tooltip": { "shared": true, @@ -7550,51 +8279,43 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { "cards": { - "cardPadding": 0, - "cardRound": null + "cardPadding": 0 }, "color": { "cardColor": "#b4ff00", "colorScale": "sqrt", "colorScheme": "interpolateSpectral", "exponent": 0.5, - "max": null, "min": 0, "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -7618,6 +8339,10 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "expr": "sum(rate(python_gc_time_bucket[$bucket_size])) by (le)", "format": "heatmap", "intervalFactor": 1, @@ -7634,34 +8359,37 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { - "decimals": null, "format": "s", "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true + }, + "yBucketBound": "auto" + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "refId": "A" } ], - "repeat": null, "title": "GC", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 37 + "y": 38 }, "id": 63, "panels": [ @@ -7670,10 +8398,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -7684,10 +8413,10 @@ "h": 7, "w": 12, "x": 0, - "y": 13 + "y": 14 }, "hiddenSeries": false, - "id": 42, + "id": 43, "legend": { "avg": false, "current": false, @@ -7706,7 +8435,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -7716,7 +8445,10 @@ "steppedLine": false, "targets": [ { - "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", + "datasource": { + "uid": "$datasource" + }, + "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}}-{{index}} {{command}}", @@ -7725,10 +8457,8 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Rate of incoming commands", + "title": "Rate of outgoing commands", "tooltip": { "shared": false, "sort": 0, @@ -7736,241 +8466,326 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "description": "", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, - "links": [] + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "hertz" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 13 - }, - "hiddenSeries": false, - "id": 144, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + "y": 14 }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", + "id": 41, + "links": [], "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "7.3.7", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "8.4.3", "targets": [ { - "expr": "synapse_replication_tcp_command_queue{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "format": "time_series", "interval": "", - "legendFormat": "{{stream_name}} {{job}}-{{index}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Queued incoming RDATA commands, by stream", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true + "intervalFactor": 2, + "legendFormat": "{{stream_name}}", + "refId": "A", + "step": 20 } ], - "yaxis": { - "align": false, - "alignLevel": null - } + "title": "Rate of outgoing RDATA commands, by stream", + "type": "timeseries" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, - "links": [] + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "hertz" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 20 - }, - "hiddenSeries": false, - "id": 43, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + "y": 21 }, - "lines": true, - "linewidth": 1, + "id": 42, "links": [], - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "7.3.7", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "8.4.3", "targets": [ { - "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", "format": "time_series", + "interval": "", "intervalFactor": 2, "legendFormat": "{{job}}-{{index}} {{command}}", "refId": "A", "step": 20 } ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Rate of outgoing commands", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" + "title": "Rate of incoming commands (including echoes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 1, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "hertz" + }, + "overrides": [] }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 220, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.4.3", + "targets": [ { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(synapse_replication_tcp_protocol_inbound_rdata_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{stream_name}}", + "refId": "A", + "step": 20 } ], - "yaxis": { - "align": false, - "alignLevel": null - } + "title": "Rate of incoming RDATA commands (excluding echoes), by stream", + "type": "timeseries" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -7980,11 +8795,11 @@ "gridPos": { "h": 7, "w": 12, - "x": 12, - "y": 20 + "x": 0, + "y": 28 }, "hiddenSeries": false, - "id": 41, + "id": 144, "legend": { "avg": false, "current": false, @@ -7996,15 +8811,13 @@ }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { "alertThreshold": true }, - "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", - "pointradius": 5, + "pluginVersion": "8.4.3", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -8013,20 +8826,19 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "synapse_replication_tcp_command_queue{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", - "intervalFactor": 2, - "legendFormat": "{{stream_name}}", - "refId": "A", - "step": 20 + "legendFormat": "{{stream_name}} {{job}}-{{index}}", + "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Outgoing stream updates", + "title": "Queued incoming RDATA commands, by stream", "tooltip": { "shared": false, "sort": 0, @@ -8034,33 +8846,26 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "hertz", - "label": null, + "$$hashKey": "object:218", + "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:219", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -8068,10 +8873,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -8081,11 +8887,11 @@ "gridPos": { "h": 7, "w": 12, - "x": 0, - "y": 27 + "x": 12, + "y": 28 }, "hiddenSeries": false, - "id": 113, + "id": 115, "legend": { "avg": false, "current": false, @@ -8104,7 +8910,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -8114,25 +8920,19 @@ "steppedLine": false, "targets": [ { - "expr": "synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{stream_name}}", - "refId": "A" - }, - { - "expr": "synapse_replication_tcp_resource_total_connections{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", + "datasource": { + "uid": "$datasource" + }, + "expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}}", - "refId": "B" + "legendFormat": "{{job}}-{{index}} {{reason_type}}", + "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Replication connections", + "title": "Replication connection close reasons", "tooltip": { "shared": true, "sort": 0, @@ -8140,33 +8940,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "short", - "label": null, + "format": "hertz", "logBase": 1, - "max": null, - "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -8174,10 +8965,11 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -8187,11 +8979,11 @@ "gridPos": { "h": 7, "w": 12, - "x": 12, - "y": 27 + "x": 0, + "y": 35 }, "hiddenSeries": false, - "id": 115, + "id": 113, "legend": { "avg": false, "current": false, @@ -8210,7 +9002,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.4.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -8220,18 +9012,29 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "datasource": { + "uid": "$datasource" + }, + "expr": "synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{reason_type}}", + "legendFormat": "{{job}}-{{index}} {{stream_name}}", "refId": "A" + }, + { + "datasource": { + "uid": "$datasource" + }, + "expr": "synapse_replication_tcp_resource_total_connections{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}}", + "refId": "B" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Replication connection close reasons", + "title": "Replication connections", "tooltip": { "shared": true, "sort": 0, @@ -8239,48 +9042,51 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "hertz", - "label": null, + "format": "short", "logBase": 1, - "max": null, - "min": null, + "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], - "repeat": null, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Replication", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 38 + "y": 39 }, "id": 69, "panels": [ @@ -8289,7 +9095,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -8335,6 +9143,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - on() group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "interval": "", @@ -8344,9 +9155,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Event processing lag", "tooltip": { "shared": true, @@ -8355,9 +9164,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8366,22 +9173,17 @@ "format": "short", "label": "events", "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -8389,7 +9191,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -8435,6 +9239,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, @@ -8445,9 +9252,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Age of last processed event", "tooltip": { "shared": true, @@ -8456,33 +9261,25 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "ms", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -8490,7 +9287,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -8537,6 +9336,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "deriv(synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/1000 - 1", "format": "time_series", "hide": false, @@ -8547,9 +9349,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Event processing catchup rate", "tooltip": { "shared": true, @@ -8558,67 +9358,70 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, "format": "none", "label": "fallbehind(-) / catchup(+): s/sec", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Event processing loop positions", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 39 + "y": 40 }, "id": 126, "panels": [ { "cards": { - "cardPadding": 0, - "cardRound": null + "cardPadding": 0 }, "color": { "cardColor": "#B877D9", "colorScale": "sqrt", "colorScheme": "interpolateInferno", "exponent": 0.5, - "max": null, "min": 0, "mode": "opacity" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { "defaults": { @@ -8643,6 +9446,9 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", "format": "heatmap", "intervalFactor": 1, @@ -8650,8 +9456,6 @@ "refId": "A" } ], - "timeFrom": null, - "timeShift": null, "title": "Number of rooms, by number of forward extremities in room", "tooltip": { "show": true, @@ -8661,27 +9465,22 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { "decimals": 0, "format": "short", "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "yBucketBound": "auto" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { "defaults": { @@ -8725,6 +9524,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} > 0", "format": "heatmap", "interval": "", @@ -8734,9 +9536,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Room counts, by number of extremities", "tooltip": { "shared": true, @@ -8745,40 +9545,30 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "decimals": null, "format": "none", "label": "Number of rooms", "logBase": 10, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { "cards": { - "cardPadding": 0, - "cardRound": null + "cardPadding": 0 }, "color": { "cardColor": "#5794F2", @@ -8789,7 +9579,9 @@ "mode": "opacity" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.", "fieldConfig": { "defaults": { @@ -8814,6 +9606,9 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", "format": "heatmap", "intervalFactor": 1, @@ -8821,8 +9616,6 @@ "refId": "A" } ], - "timeFrom": null, - "timeShift": null, "title": "Events persisted, by number of forward extremities in room (heatmap)", "tooltip": { "show": true, @@ -8832,27 +9625,22 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { "decimals": 0, "format": "short", "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "yBucketBound": "auto" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.", "fieldConfig": { "defaults": { @@ -8895,6 +9683,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, @@ -8902,6 +9693,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, @@ -8909,6 +9703,9 @@ "refId": "B" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, @@ -8916,6 +9713,9 @@ "refId": "C" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, @@ -8924,9 +9724,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Events persisted, by number of forward extremities in room (quantiles)", "tooltip": { "shared": true, @@ -8935,9 +9733,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8946,28 +9742,22 @@ "format": "short", "label": "Number of extremities in room", "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { "cards": { - "cardPadding": 0, - "cardRound": null + "cardPadding": 0 }, "color": { "cardColor": "#FF9830", @@ -8978,7 +9768,9 @@ "mode": "opacity" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { "defaults": { @@ -9003,6 +9795,9 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", "format": "heatmap", "intervalFactor": 1, @@ -9010,8 +9805,6 @@ "refId": "A" } ], - "timeFrom": null, - "timeShift": null, "title": "Events persisted, by number of stale forward extremities in room (heatmap)", "tooltip": { "show": true, @@ -9021,27 +9814,22 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { "decimals": 0, "format": "short", "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "yBucketBound": "auto" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { "defaults": { @@ -9084,6 +9872,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, @@ -9091,6 +9882,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, @@ -9098,6 +9892,9 @@ "refId": "B" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, @@ -9105,6 +9902,9 @@ "refId": "C" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, @@ -9113,9 +9913,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Events persisted, by number of stale forward extremities in room (quantiles)", "tooltip": { "shared": true, @@ -9124,9 +9922,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9135,28 +9931,22 @@ "format": "short", "label": "Number of stale forward extremities in room", "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { "cards": { - "cardPadding": 0, - "cardRound": null + "cardPadding": 0 }, "color": { "cardColor": "#73BF69", @@ -9167,7 +9957,9 @@ "mode": "opacity" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.", "fieldConfig": { "defaults": { @@ -9192,6 +9984,9 @@ "reverseYBuckets": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "heatmap", "interval": "", @@ -9200,8 +9995,6 @@ "refId": "A" } ], - "timeFrom": null, - "timeShift": null, "title": "Number of state resolution performed, by number of state groups involved (heatmap)", "tooltip": { "show": true, @@ -9211,27 +10004,22 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { "decimals": 0, "format": "short", "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "yBucketBound": "auto" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.", "fieldConfig": { "defaults": { @@ -9275,6 +10063,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", "interval": "", @@ -9283,6 +10074,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.75, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", "interval": "", @@ -9291,6 +10085,9 @@ "refId": "B" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.90, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", "interval": "", @@ -9299,6 +10096,9 @@ "refId": "C" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "histogram_quantile(0.99, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", "interval": "", @@ -9308,9 +10108,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Number of state resolutions performed, by number of state groups involved (quantiles)", "tooltip": { "shared": true, @@ -9319,9 +10117,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9330,22 +10126,17 @@ "format": "short", "label": "Number of state groups", "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -9353,7 +10144,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.", "fieldConfig": { "defaults": { @@ -9394,18 +10187,27 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", "legendFormat": "State res ", "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", "legendFormat": "Potential to prune", "refId": "B" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "sum(rate(synapse_storage_events_times_pruned_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", "legendFormat": "Pruned", @@ -9413,9 +10215,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Stale extremity dropping", "tooltip": { "shared": true, @@ -9424,47 +10224,50 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Extremities", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 40 + "y": 41 }, "id": 158, "panels": [ @@ -9473,7 +10276,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -9525,6 +10330,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_admin_mau:current{instance=\"$instance\", job=~\"$job\"}", "format": "time_series", "interval": "", @@ -9533,6 +10341,9 @@ "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_admin_mau:max{instance=\"$instance\", job=~\"$job\"}", "format": "time_series", "interval": "", @@ -9542,9 +10353,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "MAU Limits", "tooltip": { "shared": true, @@ -9553,33 +10362,27 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:176", "format": "short", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, { + "$$hashKey": "object:177", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -9587,7 +10390,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {} @@ -9630,6 +10435,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "synapse_admin_mau_current_mau_by_service{instance=\"$instance\"}", "interval": "", "legendFormat": "{{ app_service }}", @@ -9637,9 +10445,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "MAU by Appservice", "tooltip": { "shared": true, @@ -9648,47 +10454,50 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "MAU", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 41 + "y": 42 }, "id": 177, "panels": [ @@ -9697,7 +10506,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -9739,6 +10550,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_notifier_users_woken_by_stream{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "hide": false, @@ -9750,9 +10564,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Notifier Streams Woken", "tooltip": { "shared": true, @@ -9761,33 +10573,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -9795,7 +10598,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {}, @@ -9837,6 +10642,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_handler_presence_get_updates{job=~\"$job\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "interval": "", @@ -9847,9 +10655,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Presence Stream Fetch Type Rates", "tooltip": { "shared": true, @@ -9858,47 +10664,51 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Notifier", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 42 + "y": 43 }, "id": 170, "panels": [ @@ -9907,12 +10717,8 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, "fill": 1, "fillGradient": 0, @@ -9940,7 +10746,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.3.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -9950,6 +10756,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_appservice_api_sent_events{instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "{{service}}", @@ -9957,9 +10766,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Sent Events rate", "tooltip": { "shared": true, @@ -9968,33 +10775,26 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:177", "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:178", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -10002,12 +10802,8 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "uid": "$datasource" }, "fill": 1, "fillGradient": 0, @@ -10035,7 +10831,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "8.3.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -10045,16 +10841,17 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_appservice_api_sent_transactions{instance=\"$instance\"}[$bucket_size])", "interval": "", - "legendFormat": "{{service}}", + "legendFormat": "{{exported_service }} {{ service }}", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Transactions rate", "tooltip": { "shared": true, @@ -10063,47 +10860,52 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:260", "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:261", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Appservices", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 43 + "y": 44 }, "id": 188, "panels": [ @@ -10112,7 +10914,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {} @@ -10155,30 +10959,45 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_handler_presence_notified_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Notified", "refId": "A" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_handler_presence_federation_presence_out{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Remote ping", "refId": "B" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_handler_presence_presence_updates{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Total updates", "refId": "C" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_handler_presence_federation_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Remote updates", "refId": "D" }, { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_handler_presence_bump_active_time{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Bump active time", @@ -10186,9 +11005,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Presence", "tooltip": { "shared": true, @@ -10197,33 +11014,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -10231,7 +11039,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {} @@ -10274,6 +11084,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_handler_presence_state_transition{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "{{from}} -> {{to}}", @@ -10281,9 +11094,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Presence state transitions", "tooltip": { "shared": true, @@ -10292,33 +11103,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -10326,7 +11128,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "fieldConfig": { "defaults": { "custom": {} @@ -10369,6 +11173,9 @@ "steppedLine": false, "targets": [ { + "datasource": { + "uid": "$datasource" + }, "expr": "rate(synapse_handler_presence_notify_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "{{reason}}", @@ -10376,9 +11183,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "Presence notify reason", "tooltip": { "shared": true, @@ -10387,165 +11192,162 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:165", "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:166", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, + "refId": "A" + } + ], "title": "Presence", "type": "row" }, { "collapsed": true, - "datasource": "${DS_PROMETHEUS}", + "datasource": { + "type": "prometheus", + "uid": "000000001" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 44 + "y": 45 }, "id": 197, "panels": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {} + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "hertz" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 1 + "y": 46 }, - "hiddenSeries": false, "id": 191, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } }, - "percentage": false, - "pluginVersion": "7.3.7", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "9.0.4", "targets": [ { - "expr": "rate(synapse_external_cache_set{job=\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "rate(synapse_external_cache_set{job=~\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])", "interval": "", - "legendFormat": "{{ cache_name }} {{ index }}", + "legendFormat": "{{ cache_name }} {{job}}-{{ index }}", + "range": true, "refId": "A" } ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, "title": "External Cache Set Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } + "type": "timeseries" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "type": "prometheus", + "uid": "$datasource" }, + "description": "", "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 1 + "y": 46 }, "hiddenSeries": false, "id": 193, @@ -10565,7 +11367,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.0.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -10575,16 +11377,19 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_external_cache_get{job=\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum without (hit) (rate(synapse_external_cache_get{job=~\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size]))", "interval": "", - "legendFormat": "{{ cache_name }} {{ index }}", + "legendFormat": "{{ cache_name }} {{job}}-{{ index }}", + "range": true, "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "External Cache Get Rate", "tooltip": { "shared": true, @@ -10593,39 +11398,31 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { + "$$hashKey": "object:390", "format": "hertz", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:391", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { "cards": { - "cardPadding": -1, - "cardRound": null + "cardPadding": -1 }, "color": { "cardColor": "#b4ff00", @@ -10636,18 +11433,15 @@ "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] + "datasource": { + "type": "prometheus", + "uid": "$datasource" }, "gridPos": { - "h": 9, + "h": 8, "w": 12, "x": 0, - "y": 9 + "y": 54 }, "heatmap": {}, "hideZeroBuckets": false, @@ -10660,7 +11454,10 @@ "reverseYBuckets": false, "targets": [ { - "expr": "sum(rate(synapse_external_cache_response_time_seconds_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le)", + "datasource": { + "uid": "$datasource" + }, + "expr": "sum(rate(synapse_external_cache_response_time_seconds_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])) by (le)", "format": "heatmap", "instant": false, "interval": "", @@ -10679,20 +11476,109 @@ "xAxis": { "show": true }, - "xBucketNumber": null, - "xBucketSize": null, "yAxis": { "decimals": 0, "format": "s", "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null + "show": true + }, + "yBucketBound": "auto" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "hertz" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 54 + }, + "id": 223, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.0.4", + "targets": [ + { + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "rate(synapse_external_cache_get{job=~\"$job\", instance=\"$instance\", index=~\"$index\", hit=\"False\"}[$bucket_size])", + "interval": "", + "legendFormat": "{{ cache_name }} {{job}}-{{ index }}", + "range": true, + "refId": "A" + } + ], + "title": "External Cache Miss Rate", + "type": "timeseries" + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "000000001" }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null + "refId": "A" } ], "title": "External Cache", @@ -10700,7 +11586,7 @@ } ], "refresh": false, - "schemaVersion": 26, + "schemaVersion": 36, "style": "dark", "tags": [ "matrix" @@ -10713,10 +11599,8 @@ "text": "default", "value": "default" }, - "error": null, "hide": 0, "includeAll": false, - "label": null, "multi": false, "name": "datasource", "options": [], @@ -10731,14 +11615,12 @@ "allFormat": "glob", "auto": true, "auto_count": 100, - "auto_min": "60s", + "auto_min": "30s", "current": { "selected": false, "text": "auto", "value": "$__auto_interval_bucket_size" }, - "datasource": null, - "error": null, "hide": 0, "includeAll": false, "label": "Bucket Size", @@ -10789,24 +11671,25 @@ "type": "interval" }, { - "allValue": null, "current": {}, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "definition": "", - "error": null, "hide": 0, "includeAll": false, - "label": null, "multi": false, "name": "instance", "options": [], - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)", + "query": { + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)", + "refId": "Prometheus-instance-Variable-Query" + }, "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -10815,9 +11698,10 @@ "allFormat": "regex wildcard", "allValue": "", "current": {}, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "definition": "", - "error": null, "hide": 0, "hideLabel": false, "includeAll": true, @@ -10826,14 +11710,16 @@ "multiFormat": "regex values", "name": "job", "options": [], - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)", + "query": { + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)", + "refId": "Prometheus-job-Variable-Query" + }, "refresh": 2, "refresh_on_load": false, "regex": "", "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -10842,9 +11728,10 @@ "allFormat": "regex wildcard", "allValue": ".*", "current": {}, - "datasource": "$datasource", + "datasource": { + "uid": "$datasource" + }, "definition": "", - "error": null, "hide": 0, "hideLabel": false, "includeAll": true, @@ -10853,14 +11740,16 @@ "multiFormat": "regex values", "name": "index", "options": [], - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)", + "query": { + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)", + "refId": "Prometheus-index-Variable-Query" + }, "refresh": 2, "refresh_on_load": false, "regex": "", "skipUrlSync": false, "sort": 3, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -10868,8 +11757,8 @@ ] }, "time": { - "from": "now-3h", - "to": "now" + "from": "2022-07-22T04:08:13.716Z", + "to": "2022-07-22T18:44:27.863Z" }, "timepicker": { "now": true, @@ -10900,5 +11789,6 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "version": 100 -} + "version": 124, + "weekStart": "" +} \ No newline at end of file -- cgit 1.4.1 From f383b9b3eceaa082d5ae690550fe41460b711779 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 15 Aug 2022 11:32:30 +0100 Subject: Update locked versions of mypy and mypy-zope (#13521) --- changelog.d/13521.misc | 1 + poetry.lock | 78 +++++++++++++++++++++--------------------- synapse/app/_base.py | 4 +-- synapse/logging/context.py | 20 +++++------ synapse/logging/opentracing.py | 4 +-- synapse/storage/database.py | 22 ++++-------- tests/utils.py | 4 +-- 7 files changed, 60 insertions(+), 73 deletions(-) create mode 100644 changelog.d/13521.misc diff --git a/changelog.d/13521.misc b/changelog.d/13521.misc new file mode 100644 index 0000000000..f8501435c0 --- /dev/null +++ b/changelog.d/13521.misc @@ -0,0 +1 @@ +Update locked versions of mypy and mypy-zope. diff --git a/poetry.lock b/poetry.lock index 1acdb5da56..c34e5e9195 100644 --- a/poetry.lock +++ b/poetry.lock @@ -177,7 +177,7 @@ optional = false python-versions = "*" [package.extras] -test = ["hypothesis (==3.55.3)", "flake8 (==3.7.8)"] +test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] [[package]] name = "constantly" @@ -435,8 +435,8 @@ optional = false python-versions = ">=3.6" [package.extras] -trio = ["async-generator", "trio"] -test = ["async-timeout", "trio", "testpath", "pytest-asyncio", "pytest-trio", "pytest"] +test = ["pytest", "pytest-trio", "pytest-asyncio", "testpath", "trio", "async-timeout"] +trio = ["trio", "async-generator"] [[package]] name = "jinja2" @@ -535,8 +535,8 @@ attrs = "*" importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""} [package.extras] -test = ["aiounittest", "twisted", "tox"] -dev = ["twine (==4.0.1)", "build (==0.8.0)", "isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "mypy (==0.910)", "aiounittest", "twisted", "tox"] +dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"] +test = ["tox", "twisted", "aiounittest"] [[package]] name = "matrix-synapse-ldap3" @@ -572,7 +572,7 @@ python-versions = "*" [[package]] name = "mypy" -version = "0.950" +version = "0.971" description = "Optional static typing for Python" category = "dev" optional = false @@ -585,9 +585,9 @@ typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} typing-extensions = ">=3.10" [package.extras] -dmypy = ["psutil (>=4.0)"] -python2 = ["typed-ast (>=1.4.0,<2)"] reports = ["lxml"] +python2 = ["typed-ast (>=1.4.0,<2)"] +dmypy = ["psutil (>=4.0)"] [[package]] name = "mypy-extensions" @@ -599,19 +599,19 @@ python-versions = "*" [[package]] name = "mypy-zope" -version = "0.3.7" +version = "0.3.9" description = "Plugin for mypy to support zope interfaces" category = "dev" optional = false python-versions = "*" [package.dependencies] -mypy = "0.950" +mypy = "0.971" "zope.interface" = "*" "zope.schema" = "*" [package.extras] -test = ["pytest (>=4.6)", "pytest-cov", "lxml"] +test = ["lxml", "pytest-cov", "pytest (>=4.6)"] [[package]] name = "netaddr" @@ -820,10 +820,10 @@ optional = false python-versions = ">=3.6" [package.extras] -tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] -docs = ["zope.interface", "sphinx-rtd-theme", "sphinx"] -dev = ["pre-commit", "mypy", "coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)", "cryptography (>=3.3.1)", "zope.interface", "sphinx-rtd-theme", "sphinx"] crypto = ["cryptography (>=3.3.1)"] +dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"] [[package]] name = "pymacaroons" @@ -2099,37 +2099,37 @@ msgpack = [ {file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"}, ] mypy = [ - {file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"}, - {file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"}, - {file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"}, - {file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"}, - {file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"}, - {file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"}, - {file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"}, - {file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"}, - {file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"}, - {file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"}, - {file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"}, - {file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"}, - {file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"}, - {file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"}, - {file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"}, - {file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"}, - {file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"}, - {file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"}, - {file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"}, - {file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"}, - {file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"}, - {file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"}, - {file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"}, + {file = "mypy-0.971-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f2899a3cbd394da157194f913a931edfd4be5f274a88041c9dc2d9cdcb1c315c"}, + {file = "mypy-0.971-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98e02d56ebe93981c41211c05adb630d1d26c14195d04d95e49cd97dbc046dc5"}, + {file = "mypy-0.971-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:19830b7dba7d5356d3e26e2427a2ec91c994cd92d983142cbd025ebe81d69cf3"}, + {file = "mypy-0.971-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:02ef476f6dcb86e6f502ae39a16b93285fef97e7f1ff22932b657d1ef1f28655"}, + {file = "mypy-0.971-cp310-cp310-win_amd64.whl", hash = "sha256:25c5750ba5609a0c7550b73a33deb314ecfb559c350bb050b655505e8aed4103"}, + {file = "mypy-0.971-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d3348e7eb2eea2472db611486846742d5d52d1290576de99d59edeb7cd4a42ca"}, + {file = "mypy-0.971-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3fa7a477b9900be9b7dd4bab30a12759e5abe9586574ceb944bc29cddf8f0417"}, + {file = "mypy-0.971-cp36-cp36m-win_amd64.whl", hash = "sha256:2ad53cf9c3adc43cf3bea0a7d01a2f2e86db9fe7596dfecb4496a5dda63cbb09"}, + {file = "mypy-0.971-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:855048b6feb6dfe09d3353466004490b1872887150c5bb5caad7838b57328cc8"}, + {file = "mypy-0.971-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:23488a14a83bca6e54402c2e6435467a4138785df93ec85aeff64c6170077fb0"}, + {file = "mypy-0.971-cp37-cp37m-win_amd64.whl", hash = "sha256:4b21e5b1a70dfb972490035128f305c39bc4bc253f34e96a4adf9127cf943eb2"}, + {file = "mypy-0.971-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9796a2ba7b4b538649caa5cecd398d873f4022ed2333ffde58eaf604c4d2cb27"}, + {file = "mypy-0.971-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a361d92635ad4ada1b1b2d3630fc2f53f2127d51cf2def9db83cba32e47c856"}, + {file = "mypy-0.971-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b793b899f7cf563b1e7044a5c97361196b938e92f0a4343a5d27966a53d2ec71"}, + {file = "mypy-0.971-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d1ea5d12c8e2d266b5fb8c7a5d2e9c0219fedfeb493b7ed60cd350322384ac27"}, + {file = "mypy-0.971-cp38-cp38-win_amd64.whl", hash = "sha256:23c7ff43fff4b0df93a186581885c8512bc50fc4d4910e0f838e35d6bb6b5e58"}, + {file = "mypy-0.971-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1f7656b69974a6933e987ee8ffb951d836272d6c0f81d727f1d0e2696074d9e6"}, + {file = "mypy-0.971-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2022bfadb7a5c2ef410d6a7c9763188afdb7f3533f22a0a32be10d571ee4bbe"}, + {file = "mypy-0.971-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef943c72a786b0f8d90fd76e9b39ce81fb7171172daf84bf43eaf937e9f220a9"}, + {file = "mypy-0.971-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d744f72eb39f69312bc6c2abf8ff6656973120e2eb3f3ec4f758ed47e414a4bf"}, + {file = "mypy-0.971-cp39-cp39-win_amd64.whl", hash = "sha256:77a514ea15d3007d33a9e2157b0ba9c267496acf12a7f2b9b9f8446337aac5b0"}, + {file = "mypy-0.971-py3-none-any.whl", hash = "sha256:0d054ef16b071149917085f51f89555a576e2618d5d9dd70bd6eea6410af3ac9"}, + {file = "mypy-0.971.tar.gz", hash = "sha256:40b0f21484238269ae6a57200c807d80debc6459d444c0489a102d7c6a75fa56"}, ] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, ] mypy-zope = [ - {file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"}, - {file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"}, + {file = "mypy-zope-0.3.9.tar.gz", hash = "sha256:afba6f694be193c12be466daa0e9bddbcfc93e332552e85724c030b34971025e"}, + {file = "mypy_zope-0.3.9-py3-none-any.whl", hash = "sha256:f476f83af95f7355c87ac5efc84b03caffdfd34e0c302f82bd232ac9c4ce501e"}, ] netaddr = [ {file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"}, diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 923891ae0d..55d135fa03 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -98,9 +98,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) func: Function to be called when sent a SIGHUP signal. *args, **kwargs: args and kwargs to be passed to the target function. """ - # This type-ignore should be redundant once we use a mypy release with - # https://github.com/python/mypy/pull/12668. - _sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type] + _sighup_callbacks.append((func, args, kwargs)) def start_worker_reactor( diff --git a/synapse/logging/context.py b/synapse/logging/context.py index fd9cb97920..6a08ffed64 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -586,7 +586,7 @@ class LoggingContextFilter(logging.Filter): True to include the record in the log output. """ context = current_context() - record.request = self._default_request # type: ignore + record.request = self._default_request # context should never be None, but if it somehow ends up being, then # we end up in a death spiral of infinite loops, so let's check, for @@ -594,21 +594,21 @@ class LoggingContextFilter(logging.Filter): if context is not None: # Logging is interested in the request ID. Note that for backwards # compatibility this is stored as the "request" on the record. - record.request = str(context) # type: ignore + record.request = str(context) # Add some data from the HTTP request. request = context.request if request is None: return True - record.ip_address = request.ip_address # type: ignore - record.site_tag = request.site_tag # type: ignore - record.requester = request.requester # type: ignore - record.authenticated_entity = request.authenticated_entity # type: ignore - record.method = request.method # type: ignore - record.url = request.url # type: ignore - record.protocol = request.protocol # type: ignore - record.user_agent = request.user_agent # type: ignore + record.ip_address = request.ip_address + record.site_tag = request.site_tag + record.requester = request.requester + record.authenticated_entity = request.authenticated_entity + record.method = request.method + record.url = request.url + record.protocol = request.protocol + record.user_agent = request.user_agent return True diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index d1fa2cf8ae..c6f3ab28e6 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -966,9 +966,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: # FIXME: We could update this to handle any type of function by ignoring the # first argument only if it's named `self` or `cls`. This isn't fool-proof # but handles the idiomatic cases. - for i, arg in enumerate(args[1:], start=1): # type: ignore[index] + for i, arg in enumerate(args[1:], start=1): set_tag("ARG_" + argspec.args[i], str(arg)) - set_tag("args", str(args[len(argspec.args) :])) # type: ignore[index] + set_tag("args", str(args[len(argspec.args) :])) set_tag("kwargs", str(kwargs)) yield diff --git a/synapse/storage/database.py b/synapse/storage/database.py index b394a6658b..5da3dc079a 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -288,8 +288,7 @@ class LoggingTransaction: # LoggingTransaction isn't expecting there to be any callbacks; assert that # is not the case. assert self.after_callbacks is not None - # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 - self.after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] + self.after_callbacks.append((callback, args, kwargs)) def async_call_after( self, callback: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs @@ -310,8 +309,7 @@ class LoggingTransaction: # LoggingTransaction isn't expecting there to be any callbacks; assert that # is not the case. assert self.async_after_callbacks is not None - # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 - self.async_after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] + self.async_after_callbacks.append((callback, args, kwargs)) def call_on_exception( self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs @@ -329,8 +327,7 @@ class LoggingTransaction: # LoggingTransaction isn't expecting there to be any callbacks; assert that # is not the case. assert self.exception_callbacks is not None - # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 - self.exception_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] + self.exception_callbacks.append((callback, args, kwargs)) def fetchone(self) -> Optional[Tuple]: return self.txn.fetchone() @@ -411,10 +408,7 @@ class LoggingTransaction: sql = self.database_engine.convert_param_style(sql) if args: try: - # The type-ignore should be redundant once mypy releases a version with - # https://github.com/python/mypy/pull/12668. (`args` might be empty, - # (but we'll catch the index error if so.) - sql_logger.debug("[SQL values] {%s} %r", self.name, args[0]) # type: ignore[index] + sql_logger.debug("[SQL values] {%s} %r", self.name, args[0]) except Exception: # Don't let logging failures stop SQL from working pass @@ -646,9 +640,7 @@ class DatabasePool: # For now, we just log an error, and hope that it works on the first attempt. # TODO: raise an exception. - # Type-ignore Mypy doesn't yet consider ParamSpec.args to be iterable; see - # https://github.com/python/mypy/pull/12668 - for i, arg in enumerate(args): # type: ignore[arg-type, var-annotated] + for i, arg in enumerate(args): if inspect.isgenerator(arg): logger.error( "Programming error: generator passed to new_transaction as " @@ -656,9 +648,7 @@ class DatabasePool: i, func, ) - # Type-ignore Mypy doesn't yet consider ParamSpec.args to be a mapping; see - # https://github.com/python/mypy/pull/12668 - for name, val in kwargs.items(): # type: ignore[attr-defined] + for name, val in kwargs.items(): if inspect.isgenerator(val): logger.error( "Programming error: generator passed to new_transaction as " diff --git a/tests/utils.py b/tests/utils.py index d2c6d1e852..ef5bd0f347 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -271,9 +271,7 @@ class MockClock: *args: P.args, **kwargs: P.kwargs, ) -> None: - # This type-ignore should be redundant once we use a mypy release with - # https://github.com/python/mypy/pull/12668. - self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs)) # type: ignore[arg-type] + self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs)) def cancel_call_later(self, timer: Timer, ignore_errs: bool = False) -> None: if timer.expired: -- cgit 1.4.1 From 46bd7f4ed9020bbed459c03a11c26d7f7c3093b0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 15 Aug 2022 09:33:17 -0400 Subject: Clarifications for event push action processing. (#13485) * Clarifies comments. * Fixes an erroneous comment (about return type) added in #13455 (ec24813220f9d54108924dc04aecd24555277b99). * Clarifies the name of a variable. * Simplifies logic of pulling out the latest join for the requesting user. --- changelog.d/13485.misc | 1 + .../storage/databases/main/event_push_actions.py | 53 ++++++++++++++-------- synapse/storage/databases/main/receipts.py | 2 +- 3 files changed, 35 insertions(+), 21 deletions(-) create mode 100644 changelog.d/13485.misc diff --git a/changelog.d/13485.misc b/changelog.d/13485.misc new file mode 100644 index 0000000000..c75712b9ff --- /dev/null +++ b/changelog.d/13485.misc @@ -0,0 +1 @@ +Add comments about how event push actions are rotated. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 161aad0f89..f62aa45ca1 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -227,7 +227,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas user_id: str, ) -> NotifCounts: """Get the notification count, the highlight count and the unread message count - for a given user in a given room after the given read receipt. + for a given user in a given room after their latest read receipt. Note that this function assumes the user to be a current member of the room, since it's either called by the sync handler to handle joined room entries, or by @@ -238,9 +238,8 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas user_id: The user to retrieve the counts for. Returns - A dict containing the counts mentioned earlier in this docstring, - respectively under the keys "notify_count", "highlight_count" and - "unread_count". + A NotifCounts object containing the notification count, the highlight count + and the unread message count. """ return await self.db_pool.runInteraction( "get_unread_event_push_actions_by_room", @@ -255,6 +254,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas room_id: str, user_id: str, ) -> NotifCounts: + # Get the stream ordering of the user's latest receipt in the room. result = self.get_last_receipt_for_user_txn( txn, user_id, @@ -266,13 +266,11 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ), ) - stream_ordering = None if result: _, stream_ordering = result - if stream_ordering is None: - # Either last_read_event_id is None, or it's an event we don't have (e.g. - # because it's been purged), in which case retrieve the stream ordering for + else: + # If the user has no receipts in the room, retrieve the stream ordering for # the latest membership event from this user in this room (which we assume is # a join). event_id = self.db_pool.simple_select_one_onecol_txn( @@ -289,10 +287,26 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ) def _get_unread_counts_by_pos_txn( - self, txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int + self, + txn: LoggingTransaction, + room_id: str, + user_id: str, + receipt_stream_ordering: int, ) -> NotifCounts: """Get the number of unread messages for a user/room that have happened since the given stream ordering. + + Args: + txn: The database transaction. + room_id: The room ID to get unread counts for. + user_id: The user ID to get unread counts for. + receipt_stream_ordering: The stream ordering of the user's latest + receipt in the room. If there are no receipts, the stream ordering + of the user's join event. + + Returns + A NotifCounts object containing the notification count, the highlight count + and the unread message count. """ counts = NotifCounts() @@ -320,7 +334,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas OR last_receipt_stream_ordering = ? ) """, - (room_id, user_id, stream_ordering, stream_ordering), + (room_id, user_id, receipt_stream_ordering, receipt_stream_ordering), ) row = txn.fetchone() @@ -338,17 +352,20 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas AND stream_ordering > ? AND highlight = 1 """ - txn.execute(sql, (user_id, room_id, stream_ordering)) + txn.execute(sql, (user_id, room_id, receipt_stream_ordering)) row = txn.fetchone() if row: counts.highlight_count += row[0] # Finally we need to count push actions that aren't included in the - # summary returned above, e.g. recent events that haven't been - # summarised yet, or the summary is empty due to a recent read receipt. - stream_ordering = max(stream_ordering, summary_stream_ordering) + # summary returned above. This might be due to recent events that haven't + # been summarised yet or the summary is out of date due to a recent read + # receipt. + start_unread_stream_ordering = max( + receipt_stream_ordering, summary_stream_ordering + ) notify_count, unread_count = self._get_notif_unread_count_for_user_room( - txn, room_id, user_id, stream_ordering + txn, room_id, user_id, start_unread_stream_ordering ) counts.notify_count += notify_count @@ -1151,8 +1168,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas txn: The database transaction. old_rotate_stream_ordering: The previous maximum event stream ordering. rotate_to_stream_ordering: The new maximum event stream ordering to summarise. - - Returns whether the archiving process has caught up or not. """ # Calculate the new counts that should be upserted into event_push_summary @@ -1238,9 +1253,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (rotate_to_stream_ordering,), ) - async def _remove_old_push_actions_that_have_rotated( - self, - ) -> None: + async def _remove_old_push_actions_that_have_rotated(self) -> None: """Clear out old push actions that have been summarised.""" # We want to clear out anything that is older than a day that *has* already diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 0090c9f225..124c70ad37 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -161,7 +161,7 @@ class ReceiptsWorkerStore(SQLBaseStore): receipt_type: The receipt types to fetch. Returns: - The latest receipt, if one exists. + The event ID and stream ordering of the latest receipt, if one exists. """ clause, args = make_in_list_sql_clause( -- cgit 1.4.1 From 19e5d44886c9f43e96c1c29377aad3ce89fa8868 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 15 Aug 2022 14:51:05 +0100 Subject: Revert "Update locked versions of mypy and mypy-zope (#13521)" This reverts commit f383b9b3eceaa082d5ae690550fe41460b711779. Other PRs were seeing mypy failures that looked to be related to mypy-zope. Confusingly, we didn't see this on #13521. Revert this for now and investigate later. --- changelog.d/13521.misc | 1 - poetry.lock | 78 +++++++++++++++++++++--------------------- synapse/app/_base.py | 4 ++- synapse/logging/context.py | 20 +++++------ synapse/logging/opentracing.py | 4 +-- synapse/storage/database.py | 22 ++++++++---- tests/utils.py | 4 ++- 7 files changed, 73 insertions(+), 60 deletions(-) delete mode 100644 changelog.d/13521.misc diff --git a/changelog.d/13521.misc b/changelog.d/13521.misc deleted file mode 100644 index f8501435c0..0000000000 --- a/changelog.d/13521.misc +++ /dev/null @@ -1 +0,0 @@ -Update locked versions of mypy and mypy-zope. diff --git a/poetry.lock b/poetry.lock index c34e5e9195..1acdb5da56 100644 --- a/poetry.lock +++ b/poetry.lock @@ -177,7 +177,7 @@ optional = false python-versions = "*" [package.extras] -test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] +test = ["hypothesis (==3.55.3)", "flake8 (==3.7.8)"] [[package]] name = "constantly" @@ -435,8 +435,8 @@ optional = false python-versions = ">=3.6" [package.extras] -test = ["pytest", "pytest-trio", "pytest-asyncio", "testpath", "trio", "async-timeout"] -trio = ["trio", "async-generator"] +trio = ["async-generator", "trio"] +test = ["async-timeout", "trio", "testpath", "pytest-asyncio", "pytest-trio", "pytest"] [[package]] name = "jinja2" @@ -535,8 +535,8 @@ attrs = "*" importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""} [package.extras] -dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"] -test = ["tox", "twisted", "aiounittest"] +test = ["aiounittest", "twisted", "tox"] +dev = ["twine (==4.0.1)", "build (==0.8.0)", "isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "mypy (==0.910)", "aiounittest", "twisted", "tox"] [[package]] name = "matrix-synapse-ldap3" @@ -572,7 +572,7 @@ python-versions = "*" [[package]] name = "mypy" -version = "0.971" +version = "0.950" description = "Optional static typing for Python" category = "dev" optional = false @@ -585,9 +585,9 @@ typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} typing-extensions = ">=3.10" [package.extras] -reports = ["lxml"] -python2 = ["typed-ast (>=1.4.0,<2)"] dmypy = ["psutil (>=4.0)"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] [[package]] name = "mypy-extensions" @@ -599,19 +599,19 @@ python-versions = "*" [[package]] name = "mypy-zope" -version = "0.3.9" +version = "0.3.7" description = "Plugin for mypy to support zope interfaces" category = "dev" optional = false python-versions = "*" [package.dependencies] -mypy = "0.971" +mypy = "0.950" "zope.interface" = "*" "zope.schema" = "*" [package.extras] -test = ["lxml", "pytest-cov", "pytest (>=4.6)"] +test = ["pytest (>=4.6)", "pytest-cov", "lxml"] [[package]] name = "netaddr" @@ -820,10 +820,10 @@ optional = false python-versions = ">=3.6" [package.extras] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +docs = ["zope.interface", "sphinx-rtd-theme", "sphinx"] +dev = ["pre-commit", "mypy", "coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)", "cryptography (>=3.3.1)", "zope.interface", "sphinx-rtd-theme", "sphinx"] crypto = ["cryptography (>=3.3.1)"] -dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"] -docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] -tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"] [[package]] name = "pymacaroons" @@ -2099,37 +2099,37 @@ msgpack = [ {file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"}, ] mypy = [ - {file = "mypy-0.971-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f2899a3cbd394da157194f913a931edfd4be5f274a88041c9dc2d9cdcb1c315c"}, - {file = "mypy-0.971-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98e02d56ebe93981c41211c05adb630d1d26c14195d04d95e49cd97dbc046dc5"}, - {file = "mypy-0.971-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:19830b7dba7d5356d3e26e2427a2ec91c994cd92d983142cbd025ebe81d69cf3"}, - {file = "mypy-0.971-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:02ef476f6dcb86e6f502ae39a16b93285fef97e7f1ff22932b657d1ef1f28655"}, - {file = "mypy-0.971-cp310-cp310-win_amd64.whl", hash = "sha256:25c5750ba5609a0c7550b73a33deb314ecfb559c350bb050b655505e8aed4103"}, - {file = "mypy-0.971-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d3348e7eb2eea2472db611486846742d5d52d1290576de99d59edeb7cd4a42ca"}, - {file = "mypy-0.971-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3fa7a477b9900be9b7dd4bab30a12759e5abe9586574ceb944bc29cddf8f0417"}, - {file = "mypy-0.971-cp36-cp36m-win_amd64.whl", hash = "sha256:2ad53cf9c3adc43cf3bea0a7d01a2f2e86db9fe7596dfecb4496a5dda63cbb09"}, - {file = "mypy-0.971-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:855048b6feb6dfe09d3353466004490b1872887150c5bb5caad7838b57328cc8"}, - {file = "mypy-0.971-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:23488a14a83bca6e54402c2e6435467a4138785df93ec85aeff64c6170077fb0"}, - {file = "mypy-0.971-cp37-cp37m-win_amd64.whl", hash = "sha256:4b21e5b1a70dfb972490035128f305c39bc4bc253f34e96a4adf9127cf943eb2"}, - {file = "mypy-0.971-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9796a2ba7b4b538649caa5cecd398d873f4022ed2333ffde58eaf604c4d2cb27"}, - {file = "mypy-0.971-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a361d92635ad4ada1b1b2d3630fc2f53f2127d51cf2def9db83cba32e47c856"}, - {file = "mypy-0.971-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b793b899f7cf563b1e7044a5c97361196b938e92f0a4343a5d27966a53d2ec71"}, - {file = "mypy-0.971-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d1ea5d12c8e2d266b5fb8c7a5d2e9c0219fedfeb493b7ed60cd350322384ac27"}, - {file = "mypy-0.971-cp38-cp38-win_amd64.whl", hash = "sha256:23c7ff43fff4b0df93a186581885c8512bc50fc4d4910e0f838e35d6bb6b5e58"}, - {file = "mypy-0.971-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1f7656b69974a6933e987ee8ffb951d836272d6c0f81d727f1d0e2696074d9e6"}, - {file = "mypy-0.971-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2022bfadb7a5c2ef410d6a7c9763188afdb7f3533f22a0a32be10d571ee4bbe"}, - {file = "mypy-0.971-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef943c72a786b0f8d90fd76e9b39ce81fb7171172daf84bf43eaf937e9f220a9"}, - {file = "mypy-0.971-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d744f72eb39f69312bc6c2abf8ff6656973120e2eb3f3ec4f758ed47e414a4bf"}, - {file = "mypy-0.971-cp39-cp39-win_amd64.whl", hash = "sha256:77a514ea15d3007d33a9e2157b0ba9c267496acf12a7f2b9b9f8446337aac5b0"}, - {file = "mypy-0.971-py3-none-any.whl", hash = "sha256:0d054ef16b071149917085f51f89555a576e2618d5d9dd70bd6eea6410af3ac9"}, - {file = "mypy-0.971.tar.gz", hash = "sha256:40b0f21484238269ae6a57200c807d80debc6459d444c0489a102d7c6a75fa56"}, + {file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"}, + {file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"}, + {file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"}, + {file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"}, + {file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"}, + {file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"}, + {file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"}, + {file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"}, + {file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"}, + {file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"}, + {file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"}, + {file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"}, + {file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"}, + {file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"}, + {file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"}, + {file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"}, + {file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"}, + {file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"}, + {file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"}, + {file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"}, + {file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"}, + {file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"}, + {file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"}, ] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, ] mypy-zope = [ - {file = "mypy-zope-0.3.9.tar.gz", hash = "sha256:afba6f694be193c12be466daa0e9bddbcfc93e332552e85724c030b34971025e"}, - {file = "mypy_zope-0.3.9-py3-none-any.whl", hash = "sha256:f476f83af95f7355c87ac5efc84b03caffdfd34e0c302f82bd232ac9c4ce501e"}, + {file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"}, + {file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"}, ] netaddr = [ {file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"}, diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 55d135fa03..923891ae0d 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -98,7 +98,9 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) func: Function to be called when sent a SIGHUP signal. *args, **kwargs: args and kwargs to be passed to the target function. """ - _sighup_callbacks.append((func, args, kwargs)) + # This type-ignore should be redundant once we use a mypy release with + # https://github.com/python/mypy/pull/12668. + _sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type] def start_worker_reactor( diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 6a08ffed64..fd9cb97920 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -586,7 +586,7 @@ class LoggingContextFilter(logging.Filter): True to include the record in the log output. """ context = current_context() - record.request = self._default_request + record.request = self._default_request # type: ignore # context should never be None, but if it somehow ends up being, then # we end up in a death spiral of infinite loops, so let's check, for @@ -594,21 +594,21 @@ class LoggingContextFilter(logging.Filter): if context is not None: # Logging is interested in the request ID. Note that for backwards # compatibility this is stored as the "request" on the record. - record.request = str(context) + record.request = str(context) # type: ignore # Add some data from the HTTP request. request = context.request if request is None: return True - record.ip_address = request.ip_address - record.site_tag = request.site_tag - record.requester = request.requester - record.authenticated_entity = request.authenticated_entity - record.method = request.method - record.url = request.url - record.protocol = request.protocol - record.user_agent = request.user_agent + record.ip_address = request.ip_address # type: ignore + record.site_tag = request.site_tag # type: ignore + record.requester = request.requester # type: ignore + record.authenticated_entity = request.authenticated_entity # type: ignore + record.method = request.method # type: ignore + record.url = request.url # type: ignore + record.protocol = request.protocol # type: ignore + record.user_agent = request.user_agent # type: ignore return True diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index c6f3ab28e6..d1fa2cf8ae 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -966,9 +966,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: # FIXME: We could update this to handle any type of function by ignoring the # first argument only if it's named `self` or `cls`. This isn't fool-proof # but handles the idiomatic cases. - for i, arg in enumerate(args[1:], start=1): + for i, arg in enumerate(args[1:], start=1): # type: ignore[index] set_tag("ARG_" + argspec.args[i], str(arg)) - set_tag("args", str(args[len(argspec.args) :])) + set_tag("args", str(args[len(argspec.args) :])) # type: ignore[index] set_tag("kwargs", str(kwargs)) yield diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 5da3dc079a..b394a6658b 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -288,7 +288,8 @@ class LoggingTransaction: # LoggingTransaction isn't expecting there to be any callbacks; assert that # is not the case. assert self.after_callbacks is not None - self.after_callbacks.append((callback, args, kwargs)) + # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 + self.after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] def async_call_after( self, callback: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs @@ -309,7 +310,8 @@ class LoggingTransaction: # LoggingTransaction isn't expecting there to be any callbacks; assert that # is not the case. assert self.async_after_callbacks is not None - self.async_after_callbacks.append((callback, args, kwargs)) + # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 + self.async_after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] def call_on_exception( self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs @@ -327,7 +329,8 @@ class LoggingTransaction: # LoggingTransaction isn't expecting there to be any callbacks; assert that # is not the case. assert self.exception_callbacks is not None - self.exception_callbacks.append((callback, args, kwargs)) + # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 + self.exception_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] def fetchone(self) -> Optional[Tuple]: return self.txn.fetchone() @@ -408,7 +411,10 @@ class LoggingTransaction: sql = self.database_engine.convert_param_style(sql) if args: try: - sql_logger.debug("[SQL values] {%s} %r", self.name, args[0]) + # The type-ignore should be redundant once mypy releases a version with + # https://github.com/python/mypy/pull/12668. (`args` might be empty, + # (but we'll catch the index error if so.) + sql_logger.debug("[SQL values] {%s} %r", self.name, args[0]) # type: ignore[index] except Exception: # Don't let logging failures stop SQL from working pass @@ -640,7 +646,9 @@ class DatabasePool: # For now, we just log an error, and hope that it works on the first attempt. # TODO: raise an exception. - for i, arg in enumerate(args): + # Type-ignore Mypy doesn't yet consider ParamSpec.args to be iterable; see + # https://github.com/python/mypy/pull/12668 + for i, arg in enumerate(args): # type: ignore[arg-type, var-annotated] if inspect.isgenerator(arg): logger.error( "Programming error: generator passed to new_transaction as " @@ -648,7 +656,9 @@ class DatabasePool: i, func, ) - for name, val in kwargs.items(): + # Type-ignore Mypy doesn't yet consider ParamSpec.args to be a mapping; see + # https://github.com/python/mypy/pull/12668 + for name, val in kwargs.items(): # type: ignore[attr-defined] if inspect.isgenerator(val): logger.error( "Programming error: generator passed to new_transaction as " diff --git a/tests/utils.py b/tests/utils.py index ef5bd0f347..d2c6d1e852 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -271,7 +271,9 @@ class MockClock: *args: P.args, **kwargs: P.kwargs, ) -> None: - self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs)) + # This type-ignore should be redundant once we use a mypy release with + # https://github.com/python/mypy/pull/12668. + self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs)) # type: ignore[arg-type] def cancel_call_later(self, timer: Timer, ignore_errs: bool = False) -> None: if timer.expired: -- cgit 1.4.1 From 344a2f767c636259412f7fc2914c1554a5c4dc1d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 15 Aug 2022 13:41:23 -0500 Subject: Instrument `FederationStateIdsServlet` - `/state_ids` (#13499) Instrument FederationStateIdsServlet - `/state_ids` so it's easier to follow what's going on in Jaeger when viewing a trace. --- changelog.d/13499.misc | 1 + synapse/federation/federation_server.py | 11 ++++++++++- synapse/handlers/federation.py | 4 +++- synapse/storage/databases/main/event_federation.py | 3 +++ synapse/util/ratelimitutils.py | 4 ++++ 5 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13499.misc diff --git a/changelog.d/13499.misc b/changelog.d/13499.misc new file mode 100644 index 0000000000..99dbcebec8 --- /dev/null +++ b/changelog.d/13499.misc @@ -0,0 +1 @@ +Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index db4b83a505..75fbc6073d 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -61,7 +61,12 @@ from synapse.logging.context import ( nested_logging_context, run_in_background, ) -from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace +from synapse.logging.opentracing import ( + log_kv, + start_active_span_from_edu, + tag_args, + trace, +) from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, @@ -547,6 +552,8 @@ class FederationServer(FederationBase): return 200, resp + @trace + @tag_args async def on_state_ids_request( self, origin: str, room_id: str, event_id: str ) -> Tuple[int, JsonDict]: @@ -569,6 +576,8 @@ class FederationServer(FederationBase): return 200, resp + @trace + @tag_args async def _on_state_ids_request_compute( self, room_id: str, event_id: str ) -> JsonDict: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5042236742..6f5ab86ac4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -59,7 +59,7 @@ from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import tag_args, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import NOT_SPAM from synapse.replication.http.federation import ( @@ -1081,6 +1081,8 @@ class FederationHandler: return event + @trace + @tag_args async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]: """Returns the state at the event. i.e. not including said event.""" event = await self.store.get_event(event_id, check_room_id=room_id) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index eec55b6478..0bc8401f2b 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -33,6 +33,7 @@ from synapse.api.constants import MAX_DEPTH, EventTypes from synapse.api.errors import StoreError from synapse.api.room_versions import EventFormatVersions, RoomVersion from synapse.events import EventBase, make_event_from_dict +from synapse.logging.opentracing import tag_args, trace from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( @@ -126,6 +127,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas ) return await self.get_events_as_list(event_ids) + @trace + @tag_args async def get_auth_chain_ids( self, room_id: str, diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 6394cc39ac..e1beaec5a3 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -27,6 +27,7 @@ from synapse.logging.context import ( make_deferred_yieldable, run_in_background, ) +from synapse.logging.opentracing import start_active_span from synapse.util import Clock if typing.TYPE_CHECKING: @@ -176,8 +177,11 @@ class _PerHostRatelimiter: # Ensure that we've properly cleaned up. self.sleeping_requests.discard(request_id) self.ready_request_queue.pop(request_id, None) + wait_span_scope.__exit__(None, None, None) return r + wait_span_scope = start_active_span("ratelimit wait") + wait_span_scope.__enter__() ret_defer.addCallbacks(on_start, on_err) ret_defer.addBoth(on_both) return make_deferred_yieldable(ret_defer) -- cgit 1.4.1 From 73c83c641138f0644582468a869e10f19f389800 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 15 Aug 2022 11:54:23 -0700 Subject: Add a warning to retention documentation regarding the possibility of database corruption (#13497) --- changelog.d/13497.doc | 2 ++ docs/message_retention_policies.md | 3 ++- docs/usage/configuration/config_documentation.md | 6 +++++- 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13497.doc diff --git a/changelog.d/13497.doc b/changelog.d/13497.doc new file mode 100644 index 0000000000..ef6dc2308d --- /dev/null +++ b/changelog.d/13497.doc @@ -0,0 +1,2 @@ +Add a warning to retention documentation regarding the possibility of database corruption. + diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md index 8c88f93935..7f3e5359f1 100644 --- a/docs/message_retention_policies.md +++ b/docs/message_retention_policies.md @@ -8,7 +8,8 @@ and allow server and room admins to configure how long messages should be kept in a homeserver's database before being purged from it. **Please note that, as this feature isn't part of the Matrix specification yet, this implementation is to be considered as -experimental.** +experimental. There are known bugs which may cause database corruption. +Proceed with caution.** A message retention policy is mainly defined by its `max_lifetime` parameter, which defines how long a message can be kept around after diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index bc3d2bec6a..d8c29e6063 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -849,7 +849,11 @@ which are older than the room's maximum retention period. Synapse will also filter events received over federation so that events that should have been purged are ignored and not stored again. -The message retention policies feature is disabled by default. +The message retention policies feature is disabled by default. Please be advised +that enabling this feature carries some risk. There are known bugs with the implementation +which can cause database corruption. Setting retention to delete older history +is less risky than deleting newer history but in general caution is advised when enabling this +experimental feature. You can read more about this feature [here](../../message_retention_policies.md). This setting has the following sub-options: * `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the -- cgit 1.4.1 From d642ce4b3258012da6c024b0b5d1396d2a3e69dd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 15 Aug 2022 20:05:57 +0100 Subject: Use Pydantic to systematically validate a first batch of endpoints in `synapse.rest.client.account`. (#13188) --- changelog.d/13188.feature | 1 + mypy.ini | 2 +- poetry.lock | 54 +++++++++++++- pyproject.toml | 3 + synapse/http/servlet.py | 25 +++++++ synapse/rest/client/account.py | 148 ++++++++++++++++---------------------- synapse/rest/client/models.py | 69 ++++++++++++++++++ synapse/rest/models.py | 23 ++++++ tests/rest/client/test_account.py | 10 +-- tests/rest/client/test_models.py | 53 ++++++++++++++ 10 files changed, 296 insertions(+), 92 deletions(-) create mode 100644 changelog.d/13188.feature create mode 100644 synapse/rest/client/models.py create mode 100644 synapse/rest/models.py create mode 100644 tests/rest/client/test_models.py diff --git a/changelog.d/13188.feature b/changelog.d/13188.feature new file mode 100644 index 0000000000..4c39b74289 --- /dev/null +++ b/changelog.d/13188.feature @@ -0,0 +1 @@ +Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). diff --git a/mypy.ini b/mypy.ini index 6add272990..e2034e411f 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,6 +1,6 @@ [mypy] namespace_packages = True -plugins = mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py +plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py follow_imports = normal check_untyped_defs = True show_error_codes = True diff --git a/poetry.lock b/poetry.lock index 1acdb5da56..651659ec98 100644 --- a/poetry.lock +++ b/poetry.lock @@ -778,6 +778,21 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +[[package]] +name = "pydantic" +version = "1.9.1" +description = "Data validation and settings management using python type hints" +category = "main" +optional = false +python-versions = ">=3.6.1" + +[package.dependencies] +typing-extensions = ">=3.7.4.3" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + [[package]] name = "pyflakes" version = "2.4.0" @@ -1563,7 +1578,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "c24bbcee7e86dbbe7cdbf49f91a25b310bf21095452641e7440129f59b077f78" +content-hash = "7de518bf27967b3547eab8574342cfb67f87d6b47b4145c13de11112141dbf2d" [metadata.files] attrs = [ @@ -2260,6 +2275,43 @@ pycparser = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, ] +pydantic = [ + {file = "pydantic-1.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8098a724c2784bf03e8070993f6d46aa2eeca031f8d8a048dff277703e6e193"}, + {file = "pydantic-1.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c320c64dd876e45254bdd350f0179da737463eea41c43bacbee9d8c9d1021f11"}, + {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18f3e912f9ad1bdec27fb06b8198a2ccc32f201e24174cec1b3424dda605a310"}, + {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11951b404e08b01b151222a1cb1a9f0a860a8153ce8334149ab9199cd198131"}, + {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8bc541a405423ce0e51c19f637050acdbdf8feca34150e0d17f675e72d119580"}, + {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e565a785233c2d03724c4dc55464559639b1ba9ecf091288dd47ad9c629433bd"}, + {file = "pydantic-1.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a4a88dcd6ff8fd47c18b3a3709a89adb39a6373f4482e04c1b765045c7e282fd"}, + {file = "pydantic-1.9.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:447d5521575f18e18240906beadc58551e97ec98142266e521c34968c76c8761"}, + {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:985ceb5d0a86fcaa61e45781e567a59baa0da292d5ed2e490d612d0de5796918"}, + {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059b6c1795170809103a1538255883e1983e5b831faea6558ef873d4955b4a74"}, + {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d12f96b5b64bec3f43c8e82b4aab7599d0157f11c798c9f9c528a72b9e0b339a"}, + {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ae72f8098acb368d877b210ebe02ba12585e77bd0db78ac04a1ee9b9f5dd2166"}, + {file = "pydantic-1.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:79b485767c13788ee314669008d01f9ef3bc05db9ea3298f6a50d3ef596a154b"}, + {file = "pydantic-1.9.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:494f7c8537f0c02b740c229af4cb47c0d39840b829ecdcfc93d91dcbb0779892"}, + {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0f047e11febe5c3198ed346b507e1d010330d56ad615a7e0a89fae604065a0e"}, + {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:969dd06110cb780da01336b281f53e2e7eb3a482831df441fb65dd30403f4608"}, + {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:177071dfc0df6248fd22b43036f936cfe2508077a72af0933d0c1fa269b18537"}, + {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9bcf8b6e011be08fb729d110f3e22e654a50f8a826b0575c7196616780683380"}, + {file = "pydantic-1.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a955260d47f03df08acf45689bd163ed9df82c0e0124beb4251b1290fa7ae728"}, + {file = "pydantic-1.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ce157d979f742a915b75f792dbd6aa63b8eccaf46a1005ba03aa8a986bde34a"}, + {file = "pydantic-1.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0bf07cab5b279859c253d26a9194a8906e6f4a210063b84b433cf90a569de0c1"}, + {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d93d4e95eacd313d2c765ebe40d49ca9dd2ed90e5b37d0d421c597af830c195"}, + {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1542636a39c4892c4f4fa6270696902acb186a9aaeac6f6cf92ce6ae2e88564b"}, + {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a9af62e9b5b9bc67b2a195ebc2c2662fdf498a822d62f902bf27cccb52dbbf49"}, + {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fe4670cb32ea98ffbf5a1262f14c3e102cccd92b1869df3bb09538158ba90fe6"}, + {file = "pydantic-1.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:9f659a5ee95c8baa2436d392267988fd0f43eb774e5eb8739252e5a7e9cf07e0"}, + {file = "pydantic-1.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b83ba3825bc91dfa989d4eed76865e71aea3a6ca1388b59fc801ee04c4d8d0d6"}, + {file = "pydantic-1.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1dd8fecbad028cd89d04a46688d2fcc14423e8a196d5b0a5c65105664901f810"}, + {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02eefd7087268b711a3ff4db528e9916ac9aa18616da7bca69c1871d0b7a091f"}, + {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7eb57ba90929bac0b6cc2af2373893d80ac559adda6933e562dcfb375029acee"}, + {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4ce9ae9e91f46c344bec3b03d6ee9612802682c1551aaf627ad24045ce090761"}, + {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:72ccb318bf0c9ab97fc04c10c37683d9eea952ed526707fabf9ac5ae59b701fd"}, + {file = "pydantic-1.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:61b6760b08b7c395975d893e0b814a11cf011ebb24f7d869e7118f5a339a82e1"}, + {file = "pydantic-1.9.1-py3-none-any.whl", hash = "sha256:4988c0f13c42bfa9ddd2fe2f569c9d54646ce84adc5de84228cfe83396f3bd58"}, + {file = "pydantic-1.9.1.tar.gz", hash = "sha256:1ed987c3ff29fff7fd8c3ea3a3ea877ad310aae2ef9889a119e22d3f2db0691a"}, +] pyflakes = [ {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"}, {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"}, diff --git a/pyproject.toml b/pyproject.toml index a9f59a676f..4f1e0b5c19 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -158,6 +158,9 @@ packaging = ">=16.1" # At the time of writing, we only use functions from the version `importlib.metadata` # which shipped in Python 3.8. This corresponds to version 1.4 of the backport. importlib_metadata = { version = ">=1.4", python = "<3.8" } +# This is the most recent version of Pydantic with available on common distros. +pydantic = ">=1.7.4" + # Optional Dependencies diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 4ff840ca0e..26aaabfb34 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -23,9 +23,12 @@ from typing import ( Optional, Sequence, Tuple, + Type, + TypeVar, overload, ) +from pydantic import BaseModel, ValidationError from typing_extensions import Literal from twisted.web.server import Request @@ -694,6 +697,28 @@ def parse_json_object_from_request( return content +Model = TypeVar("Model", bound=BaseModel) + + +def parse_and_validate_json_object_from_request( + request: Request, model_type: Type[Model] +) -> Model: + """Parse a JSON object from the body of a twisted HTTP request, then deserialise and + validate using the given pydantic model. + + Raises: + SynapseError if the request body couldn't be decoded as JSON or + if it wasn't a JSON object. + """ + content = parse_json_object_from_request(request, allow_empty_body=False) + try: + instance = model_type.parse_obj(content) + except ValidationError as e: + raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=Codes.BAD_JSON) + + return instance + + def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None: absent = [] for k in required: diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 50edc6b7d3..e5ee63133b 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -15,10 +15,11 @@ # limitations under the License. import logging import random -from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple from urllib.parse import urlparse +from pydantic import StrictBool, StrictStr, constr + from twisted.web.server import Request from synapse.api.constants import LoginType @@ -34,12 +35,15 @@ from synapse.http.server import HttpServer, finish_request, respond_with_html from synapse.http.servlet import ( RestServlet, assert_params_in_dict, + parse_and_validate_json_object_from_request, parse_json_object_from_request, parse_string, ) from synapse.http.site import SynapseRequest from synapse.metrics import threepid_send_requests from synapse.push.mailer import Mailer +from synapse.rest.client.models import AuthenticationData, EmailRequestTokenBody +from synapse.rest.models import RequestBodyModel from synapse.types import JsonDict from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import assert_valid_client_secret, random_string @@ -82,32 +86,16 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): 400, "Email-based password resets have been disabled on this server" ) - body = parse_json_object_from_request(request) - - assert_params_in_dict(body, ["client_secret", "email", "send_attempt"]) - - # Extract params from body - client_secret = body["client_secret"] - assert_valid_client_secret(client_secret) - - # Canonicalise the email address. The addresses are all stored canonicalised - # in the database. This allows the user to reset his password without having to - # know the exact spelling (eg. upper and lower case) of address in the database. - # Stored in the database "foo@bar.com" - # User requests with "FOO@bar.com" would raise a Not Found error - try: - email = validate_email(body["email"]) - except ValueError as e: - raise SynapseError(400, str(e)) - send_attempt = body["send_attempt"] - next_link = body.get("next_link") # Optional param + body = parse_and_validate_json_object_from_request( + request, EmailRequestTokenBody + ) - if next_link: + if body.next_link: # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + assert_valid_next_link(self.hs, body.next_link) await self.identity_handler.ratelimit_request_token_requests( - request, "email", email + request, "email", body.email ) # The email will be sent to the stored address. @@ -115,7 +103,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # an email address which is controlled by the attacker but which, after # canonicalisation, matches the one in our database. existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( - "email", email + "email", body.email ) if existing_user_id is None: @@ -135,26 +123,26 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # Have the configured identity server handle the request ret = await self.identity_handler.request_email_token( self.hs.config.registration.account_threepid_delegate_email, - email, - client_secret, - send_attempt, - next_link, + body.email, + body.client_secret, + body.send_attempt, + body.next_link, ) else: # Send password reset emails from Synapse sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, + body.email, + body.client_secret, + body.send_attempt, self.mailer.send_password_reset_mail, - next_link, + body.next_link, ) # Wrap the session id in a JSON object ret = {"sid": sid} threepid_send_requests.labels(type="email", reason="password_reset").observe( - send_attempt + body.send_attempt ) return 200, ret @@ -172,16 +160,23 @@ class PasswordRestServlet(RestServlet): self.password_policy_handler = hs.get_password_policy_handler() self._set_password_handler = hs.get_set_password_handler() + class PostBody(RequestBodyModel): + auth: Optional[AuthenticationData] = None + logout_devices: StrictBool = True + if TYPE_CHECKING: + # workaround for https://github.com/samuelcolvin/pydantic/issues/156 + new_password: Optional[StrictStr] = None + else: + new_password: Optional[constr(max_length=512, strict=True)] = None + @interactive_auth_handler async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - body = parse_json_object_from_request(request) + body = parse_and_validate_json_object_from_request(request, self.PostBody) # we do basic sanity checks here because the auth layer will store these # in sessions. Pull out the new password provided to us. - new_password = body.pop("new_password", None) + new_password = body.new_password if new_password is not None: - if not isinstance(new_password, str) or len(new_password) > 512: - raise SynapseError(400, "Invalid password") self.password_policy_handler.validate_password(new_password) # there are two possibilities here. Either the user does not have an @@ -201,7 +196,7 @@ class PasswordRestServlet(RestServlet): params, session_id = await self.auth_handler.validate_user_via_ui_auth( requester, request, - body, + body.dict(), "modify your account password", ) except InteractiveAuthIncompleteError as e: @@ -224,7 +219,7 @@ class PasswordRestServlet(RestServlet): result, params, session_id = await self.auth_handler.check_ui_auth( [[LoginType.EMAIL_IDENTITY]], request, - body, + body.dict(), "modify your account password", ) except InteractiveAuthIncompleteError as e: @@ -299,37 +294,33 @@ class DeactivateAccountRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() self._deactivate_account_handler = hs.get_deactivate_account_handler() + class PostBody(RequestBodyModel): + auth: Optional[AuthenticationData] = None + id_server: Optional[StrictStr] = None + # Not specced, see https://github.com/matrix-org/matrix-spec/issues/297 + erase: StrictBool = False + @interactive_auth_handler async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - body = parse_json_object_from_request(request) - erase = body.get("erase", False) - if not isinstance(erase, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'erase' must be a boolean, if given", - Codes.BAD_JSON, - ) + body = parse_and_validate_json_object_from_request(request, self.PostBody) requester = await self.auth.get_user_by_req(request) # allow ASes to deactivate their own users if requester.app_service: await self._deactivate_account_handler.deactivate_account( - requester.user.to_string(), erase, requester + requester.user.to_string(), body.erase, requester ) return 200, {} await self.auth_handler.validate_user_via_ui_auth( requester, request, - body, + body.dict(), "deactivate your account", ) result = await self._deactivate_account_handler.deactivate_account( - requester.user.to_string(), - erase, - requester, - id_server=body.get("id_server"), + requester.user.to_string(), body.erase, requester, id_server=body.id_server ) if result: id_server_unbind_result = "success" @@ -364,28 +355,15 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): "Adding emails have been disabled due to lack of an email config" ) raise SynapseError( - 400, "Adding an email to your account is disabled on this server" + 400, + "Adding an email to your account is disabled on this server", ) - body = parse_json_object_from_request(request) - assert_params_in_dict(body, ["client_secret", "email", "send_attempt"]) - client_secret = body["client_secret"] - assert_valid_client_secret(client_secret) - - # Canonicalise the email address. The addresses are all stored canonicalised - # in the database. - # This ensures that the validation email is sent to the canonicalised address - # as it will later be entered into the database. - # Otherwise the email will be sent to "FOO@bar.com" and stored as - # "foo@bar.com" in database. - try: - email = validate_email(body["email"]) - except ValueError as e: - raise SynapseError(400, str(e)) - send_attempt = body["send_attempt"] - next_link = body.get("next_link") # Optional param + body = parse_and_validate_json_object_from_request( + request, EmailRequestTokenBody + ) - if not await check_3pid_allowed(self.hs, "email", email): + if not await check_3pid_allowed(self.hs, "email", body.email): raise SynapseError( 403, "Your email domain is not authorized on this server", @@ -393,14 +371,14 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): ) await self.identity_handler.ratelimit_request_token_requests( - request, "email", email + request, "email", body.email ) - if next_link: + if body.next_link: # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + assert_valid_next_link(self.hs, body.next_link) - existing_user_id = await self.store.get_user_id_by_threepid("email", email) + existing_user_id = await self.store.get_user_id_by_threepid("email", body.email) if existing_user_id is not None: if self.config.server.request_token_inhibit_3pid_errors: @@ -419,26 +397,26 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): # Have the configured identity server handle the request ret = await self.identity_handler.request_email_token( self.hs.config.registration.account_threepid_delegate_email, - email, - client_secret, - send_attempt, - next_link, + body.email, + body.client_secret, + body.send_attempt, + body.next_link, ) else: # Send threepid validation emails from Synapse sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, + body.email, + body.client_secret, + body.send_attempt, self.mailer.send_add_threepid_mail, - next_link, + body.next_link, ) # Wrap the session id in a JSON object ret = {"sid": sid} threepid_send_requests.labels(type="email", reason="add_threepid").observe( - send_attempt + body.send_attempt ) return 200, ret diff --git a/synapse/rest/client/models.py b/synapse/rest/client/models.py new file mode 100644 index 0000000000..3150602997 --- /dev/null +++ b/synapse/rest/client/models.py @@ -0,0 +1,69 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING, Dict, Optional + +from pydantic import Extra, StrictInt, StrictStr, constr, validator + +from synapse.rest.models import RequestBodyModel +from synapse.util.threepids import validate_email + + +class AuthenticationData(RequestBodyModel): + """ + Data used during user-interactive authentication. + + (The name "Authentication Data" is taken directly from the spec.) + + Additional keys will be present, depending on the `type` field. Use `.dict()` to + access them. + """ + + class Config: + extra = Extra.allow + + session: Optional[StrictStr] = None + type: Optional[StrictStr] = None + + +class EmailRequestTokenBody(RequestBodyModel): + if TYPE_CHECKING: + client_secret: StrictStr + else: + # See also assert_valid_client_secret() + client_secret: constr( + regex="[0-9a-zA-Z.=_-]", # noqa: F722 + min_length=0, + max_length=255, + strict=True, + ) + email: StrictStr + id_server: Optional[StrictStr] + id_access_token: Optional[StrictStr] + next_link: Optional[StrictStr] + send_attempt: StrictInt + + @validator("id_access_token", always=True) + def token_required_for_identity_server( + cls, token: Optional[str], values: Dict[str, object] + ) -> Optional[str]: + if values.get("id_server") is not None and token is None: + raise ValueError("id_access_token is required if an id_server is supplied.") + return token + + # Canonicalise the email address. The addresses are all stored canonicalised + # in the database. This allows the user to reset his password without having to + # know the exact spelling (eg. upper and lower case) of address in the database. + # Without this, an email stored in the database as "foo@bar.com" would cause + # user requests for "FOO@bar.com" to raise a Not Found error. + _email_validator = validator("email", allow_reuse=True)(validate_email) diff --git a/synapse/rest/models.py b/synapse/rest/models.py new file mode 100644 index 0000000000..ac39cda8e5 --- /dev/null +++ b/synapse/rest/models.py @@ -0,0 +1,23 @@ +from pydantic import BaseModel, Extra + + +class RequestBodyModel(BaseModel): + """A custom version of Pydantic's BaseModel which + + - ignores unknown fields and + - does not allow fields to be overwritten after construction, + + but otherwise uses Pydantic's default behaviour. + + Ignoring unknown fields is a useful default. It means that clients can provide + unstable field not known to the server without the request being refused outright. + + Subclassing in this way is recommended by + https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally + """ + + class Config: + # By default, ignore fields that we don't recognise. + extra = Extra.ignore + # By default, don't allow fields to be reassigned after parsing. + allow_mutation = False diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 7ae926dc9c..c1a7fb2f8a 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -488,7 +488,7 @@ class DeactivateTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", "account/deactivate", request_data, access_token=tok ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, 200, channel.json_body) class WhoamiTestCase(unittest.HomeserverTestCase): @@ -641,21 +641,21 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): def test_add_email_no_at(self) -> None: self._request_token_invalid_email( "address-without-at.bar", - expected_errcode=Codes.UNKNOWN, + expected_errcode=Codes.BAD_JSON, expected_error="Unable to parse email address", ) def test_add_email_two_at(self) -> None: self._request_token_invalid_email( "foo@foo@test.bar", - expected_errcode=Codes.UNKNOWN, + expected_errcode=Codes.BAD_JSON, expected_error="Unable to parse email address", ) def test_add_email_bad_format(self) -> None: self._request_token_invalid_email( "user@bad.example.net@good.example.com", - expected_errcode=Codes.UNKNOWN, + expected_errcode=Codes.BAD_JSON, expected_error="Unable to parse email address", ) @@ -1001,7 +1001,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] ) self.assertEqual(expected_errcode, channel.json_body["errcode"]) - self.assertEqual(expected_error, channel.json_body["error"]) + self.assertIn(expected_error, channel.json_body["error"]) def _validate_token(self, link: str) -> None: # Remove the host diff --git a/tests/rest/client/test_models.py b/tests/rest/client/test_models.py new file mode 100644 index 0000000000..a9da00665e --- /dev/null +++ b/tests/rest/client/test_models.py @@ -0,0 +1,53 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +from pydantic import ValidationError + +from synapse.rest.client.models import EmailRequestTokenBody + + +class EmailRequestTokenBodyTestCase(unittest.TestCase): + base_request = { + "client_secret": "hunter2", + "email": "alice@wonderland.com", + "send_attempt": 1, + } + + def test_token_required_if_id_server_provided(self) -> None: + with self.assertRaises(ValidationError): + EmailRequestTokenBody.parse_obj( + { + **self.base_request, + "id_server": "identity.wonderland.com", + } + ) + with self.assertRaises(ValidationError): + EmailRequestTokenBody.parse_obj( + { + **self.base_request, + "id_server": "identity.wonderland.com", + "id_access_token": None, + } + ) + + def test_token_typechecked_when_id_server_provided(self) -> None: + with self.assertRaises(ValidationError): + EmailRequestTokenBody.parse_obj( + { + **self.base_request, + "id_server": "identity.wonderland.com", + "id_access_token": 1337, + } + ) -- cgit 1.4.1 From 5442891cbca67d3af27c448791589e0b9abeb7f8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Aug 2022 12:22:17 +0100 Subject: Make push rules use proper structures. (#13522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This improves load times for push rules: | Version | Time per user | Time for 1k users | | -------------------- | ------------- | ----------------- | | Before | 138 µs | 138ms | | Now (with custom) | 2.11 µs | 2.11ms | | Now (without custom) | 49.7 ns | 0.05 ms | This therefore has a large impact on send times for rooms with large numbers of local users in the room. --- changelog.d/13522.misc | 1 + synapse/push/baserules.py | 518 +++++++++++++-------- synapse/push/bulk_push_rule_evaluator.py | 37 +- synapse/push/clientformat.py | 68 +-- synapse/push/push_rule_evaluator.py | 27 +- .../storage/databases/main/event_push_actions.py | 22 +- synapse/storage/databases/main/push_rule.py | 121 +++-- tests/handlers/test_deactivate_account.py | 33 +- 8 files changed, 494 insertions(+), 333 deletions(-) create mode 100644 changelog.d/13522.misc diff --git a/changelog.d/13522.misc b/changelog.d/13522.misc new file mode 100644 index 0000000000..0a8827205d --- /dev/null +++ b/changelog.d/13522.misc @@ -0,0 +1 @@ +Improve performance of sending messages in rooms with thousands of local users. diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 6c0cc5a6ce..c3e072033c 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -14,128 +14,224 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy -from typing import Any, Dict, List - -from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP +""" +Push rules is the system used to determine which events trigger a push (and a +bump in notification counts). + +This consists of a list of "push rules" for each user, where a push rule is a +pair of "conditions" and "actions". When a user receives an event Synapse +iterates over the list of push rules until it finds one where all the conditions +match the event, at which point "actions" describe the outcome (e.g. notify, +highlight, etc). + +Push rules are split up into 5 different "kinds" (aka "priority classes"), which +are run in order: + 1. Override — highest priority rules, e.g. always ignore notices + 2. Content — content specific rules, e.g. @ notifications + 3. Room — per room rules, e.g. enable/disable notifications for all messages + in a room + 4. Sender — per sender rules, e.g. never notify for messages from a given + user + 5. Underride — the lowest priority "default" rules, e.g. notify for every + message. + +The set of "base rules" are the list of rules that every user has by default. A +user can modify their copy of the push rules in one of three ways: + + 1. Adding a new push rule of a certain kind + 2. Changing the actions of a base rule + 3. Enabling/disabling a base rule. + +The base rules are split into whether they come before or after a particular +kind, so the order of push rule evaluation would be: base rules for before +"override" kind, user defined "override" rules, base rules after "override" +kind, etc, etc. +""" + +import itertools +from typing import Dict, Iterator, List, Mapping, Sequence, Tuple, Union + +import attr + +from synapse.config.experimental import ExperimentalConfig +from synapse.push.rulekinds import PRIORITY_CLASS_MAP + + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class PushRule: + """A push rule + + Attributes: + rule_id: a unique ID for this rule + priority_class: what "kind" of push rule this is (see + `PRIORITY_CLASS_MAP` for mapping between int and kind) + conditions: the sequence of conditions that all need to match + actions: the actions to apply if all conditions are met + default: is this a base rule? + default_enabled: is this enabled by default? + """ + rule_id: str + priority_class: int + conditions: Sequence[Mapping[str, str]] + actions: Sequence[Union[str, Mapping]] + default: bool = False + default_enabled: bool = True -def list_with_base_rules(rawrules: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """Combine the list of rules set by the user with the default push rules - Args: - rawrules: The rules the user has modified or set. +@attr.s(auto_attribs=True, slots=True, frozen=True, weakref_slot=False) +class PushRules: + """A collection of push rules for an account. - Returns: - A new list with the rules set by the user combined with the defaults. + Can be iterated over, producing push rules in priority order. """ - ruleslist = [] - # Grab the base rules that the user has modified. - # The modified base rules have a priority_class of -1. - modified_base_rules = {r["rule_id"]: r for r in rawrules if r["priority_class"] < 0} + # A mapping from rule ID to push rule that overrides a base rule. These will + # be returned instead of the base rule. + overriden_base_rules: Dict[str, PushRule] = attr.Factory(dict) + + # The following stores the custom push rules at each priority class. + # + # We keep these separate (rather than combining into one big list) to avoid + # copying the base rules around all the time. + override: List[PushRule] = attr.Factory(list) + content: List[PushRule] = attr.Factory(list) + room: List[PushRule] = attr.Factory(list) + sender: List[PushRule] = attr.Factory(list) + underride: List[PushRule] = attr.Factory(list) + + def __iter__(self) -> Iterator[PushRule]: + # When iterating over the push rules we need to return the base rules + # interspersed at the correct spots. + for rule in itertools.chain( + BASE_PREPEND_OVERRIDE_RULES, + self.override, + BASE_APPEND_OVERRIDE_RULES, + self.content, + BASE_APPEND_CONTENT_RULES, + self.room, + self.sender, + self.underride, + BASE_APPEND_UNDERRIDE_RULES, + ): + # Check if a base rule has been overriden by a custom rule. If so + # return that instead. + override_rule = self.overriden_base_rules.get(rule.rule_id) + if override_rule: + yield override_rule + else: + yield rule + + def __len__(self) -> int: + # The length is mostly used by caches to get a sense of "size" / amount + # of memory this object is using, so we only count the number of custom + # rules. + return ( + len(self.overriden_base_rules) + + len(self.override) + + len(self.content) + + len(self.room) + + len(self.sender) + + len(self.underride) + ) - # Remove the modified base rules from the list, They'll be added back - # in the default positions in the list. - rawrules = [r for r in rawrules if r["priority_class"] >= 0] - # shove the server default rules for each kind onto the end of each - current_prio_class = list(PRIORITY_CLASS_INVERSE_MAP)[-1] +@attr.s(auto_attribs=True, slots=True, frozen=True, weakref_slot=False) +class FilteredPushRules: + """A wrapper around `PushRules` that filters out disabled experimental push + rules, and includes the "enabled" state for each rule when iterated over. + """ - ruleslist.extend( - make_base_prepend_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules - ) - ) + push_rules: PushRules + enabled_map: Dict[str, bool] + experimental_config: ExperimentalConfig - for r in rawrules: - if r["priority_class"] < current_prio_class: - while r["priority_class"] < current_prio_class: - ruleslist.extend( - make_base_append_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class], - modified_base_rules, - ) - ) - current_prio_class -= 1 - if current_prio_class > 0: - ruleslist.extend( - make_base_prepend_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class], - modified_base_rules, - ) - ) - - ruleslist.append(r) - - while current_prio_class > 0: - ruleslist.extend( - make_base_append_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules - ) - ) - current_prio_class -= 1 - if current_prio_class > 0: - ruleslist.extend( - make_base_prepend_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules - ) - ) + def __iter__(self) -> Iterator[Tuple[PushRule, bool]]: + for rule in self.push_rules: + if not _is_experimental_rule_enabled( + rule.rule_id, self.experimental_config + ): + continue - return ruleslist + enabled = self.enabled_map.get(rule.rule_id, rule.default_enabled) + yield rule, enabled -def make_base_append_rules( - kind: str, modified_base_rules: Dict[str, Dict[str, Any]] -) -> List[Dict[str, Any]]: - rules = [] + def __len__(self) -> int: + return len(self.push_rules) - if kind == "override": - rules = BASE_APPEND_OVERRIDE_RULES - elif kind == "underride": - rules = BASE_APPEND_UNDERRIDE_RULES - elif kind == "content": - rules = BASE_APPEND_CONTENT_RULES - # Copy the rules before modifying them - rules = copy.deepcopy(rules) - for r in rules: - # Only modify the actions, keep the conditions the same. - assert isinstance(r["rule_id"], str) - modified = modified_base_rules.get(r["rule_id"]) - if modified: - r["actions"] = modified["actions"] +DEFAULT_EMPTY_PUSH_RULES = PushRules() - return rules +def compile_push_rules(rawrules: List[PushRule]) -> PushRules: + """Given a set of custom push rules return a `PushRules` instance (which + includes the base rules). + """ + + if not rawrules: + # Fast path to avoid allocating empty lists when there are no custom + # rules for the user. + return DEFAULT_EMPTY_PUSH_RULES -def make_base_prepend_rules( - kind: str, - modified_base_rules: Dict[str, Dict[str, Any]], -) -> List[Dict[str, Any]]: - rules = [] + rules = PushRules() - if kind == "override": - rules = BASE_PREPEND_OVERRIDE_RULES + for rule in rawrules: + # We need to decide which bucket each custom push rule goes into. - # Copy the rules before modifying them - rules = copy.deepcopy(rules) - for r in rules: - # Only modify the actions, keep the conditions the same. - assert isinstance(r["rule_id"], str) - modified = modified_base_rules.get(r["rule_id"]) - if modified: - r["actions"] = modified["actions"] + # If it has the same ID as a base rule then it overrides that... + overriden_base_rule = BASE_RULES_BY_ID.get(rule.rule_id) + if overriden_base_rule: + rules.overriden_base_rules[rule.rule_id] = attr.evolve( + overriden_base_rule, actions=rule.actions + ) + continue + + # ... otherwise it gets added to the appropriate priority class bucket + collection: List[PushRule] + if rule.priority_class == 5: + collection = rules.override + elif rule.priority_class == 4: + collection = rules.content + elif rule.priority_class == 3: + collection = rules.room + elif rule.priority_class == 2: + collection = rules.sender + elif rule.priority_class == 1: + collection = rules.underride + else: + raise Exception(f"Unknown priority class: {rule.priority_class}") + + collection.append(rule) return rules -# We have to annotate these types, otherwise mypy infers them as -# `List[Dict[str, Sequence[Collection[str]]]]`. -BASE_APPEND_CONTENT_RULES: List[Dict[str, Any]] = [ - { - "rule_id": "global/content/.m.rule.contains_user_name", - "conditions": [ +def _is_experimental_rule_enabled( + rule_id: str, experimental_config: ExperimentalConfig +) -> bool: + """Used by `FilteredPushRules` to filter out experimental rules when they + have not been enabled. + """ + if ( + rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl" + and not experimental_config.msc3786_enabled + ): + return False + if ( + rule_id == "global/underride/.org.matrix.msc3772.thread_reply" + and not experimental_config.msc3772_enabled + ): + return False + return True + + +BASE_APPEND_CONTENT_RULES = [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["content"], + rule_id="global/content/.m.rule.contains_user_name", + conditions=[ { "kind": "event_match", "key": "content.body", @@ -143,29 +239,33 @@ BASE_APPEND_CONTENT_RULES: List[Dict[str, Any]] = [ "pattern_type": "user_localpart", } ], - "actions": [ + actions=[ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}, ], - } + ) ] -BASE_PREPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ - { - "rule_id": "global/override/.m.rule.master", - "enabled": False, - "conditions": [], - "actions": ["dont_notify"], - } +BASE_PREPEND_OVERRIDE_RULES = [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.m.rule.master", + default_enabled=False, + conditions=[], + actions=["dont_notify"], + ) ] -BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ - { - "rule_id": "global/override/.m.rule.suppress_notices", - "conditions": [ +BASE_APPEND_OVERRIDE_RULES = [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.m.rule.suppress_notices", + conditions=[ { "kind": "event_match", "key": "content.msgtype", @@ -173,13 +273,15 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_suppress_notices", } ], - "actions": ["dont_notify"], - }, + actions=["dont_notify"], + ), # NB. .m.rule.invite_for_me must be higher prio than .m.rule.member_event # otherwise invites will be matched by .m.rule.member_event - { - "rule_id": "global/override/.m.rule.invite_for_me", - "conditions": [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.m.rule.invite_for_me", + conditions=[ { "kind": "event_match", "key": "type", @@ -195,21 +297,23 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ # Match the requester's MXID. {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"}, ], - "actions": [ + actions=[ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight", "value": False}, ], - }, + ), # Will we sometimes want to know about people joining and leaving? # Perhaps: if so, this could be expanded upon. Seems the most usual case # is that we don't though. We add this override rule so that even if # the room rule is set to notify, we don't get notifications about # join/leave/avatar/displayname events. # See also: https://matrix.org/jira/browse/SYN-607 - { - "rule_id": "global/override/.m.rule.member_event", - "conditions": [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.m.rule.member_event", + conditions=[ { "kind": "event_match", "key": "type", @@ -217,24 +321,28 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_member", } ], - "actions": ["dont_notify"], - }, + actions=["dont_notify"], + ), # This was changed from underride to override so it's closer in priority # to the content rules where the user name highlight rule lives. This # way a room rule is lower priority than both but a custom override rule # is higher priority than both. - { - "rule_id": "global/override/.m.rule.contains_display_name", - "conditions": [{"kind": "contains_display_name"}], - "actions": [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.m.rule.contains_display_name", + conditions=[{"kind": "contains_display_name"}], + actions=[ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}, ], - }, - { - "rule_id": "global/override/.m.rule.roomnotif", - "conditions": [ + ), + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.m.rule.roomnotif", + conditions=[ { "kind": "event_match", "key": "content.body", @@ -247,11 +355,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_roomnotif_pl", }, ], - "actions": ["notify", {"set_tweak": "highlight", "value": True}], - }, - { - "rule_id": "global/override/.m.rule.tombstone", - "conditions": [ + actions=["notify", {"set_tweak": "highlight", "value": True}], + ), + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.m.rule.tombstone", + conditions=[ { "kind": "event_match", "key": "type", @@ -265,11 +375,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_tombstone_statekey", }, ], - "actions": ["notify", {"set_tweak": "highlight", "value": True}], - }, - { - "rule_id": "global/override/.m.rule.reaction", - "conditions": [ + actions=["notify", {"set_tweak": "highlight", "value": True}], + ), + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.m.rule.reaction", + conditions=[ { "kind": "event_match", "key": "type", @@ -277,14 +389,16 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_reaction", } ], - "actions": ["dont_notify"], - }, + actions=["dont_notify"], + ), # XXX: This is an experimental rule that is only enabled if msc3786_enabled # is enabled, if it is not the rule gets filtered out in _load_rules() in # PushRulesWorkerStore - { - "rule_id": "global/override/.org.matrix.msc3786.rule.room.server_acl", - "conditions": [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["override"], + rule_id="global/override/.org.matrix.msc3786.rule.room.server_acl", + conditions=[ { "kind": "event_match", "key": "type", @@ -298,15 +412,17 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_room_server_acl_state_key", }, ], - "actions": [], - }, + actions=[], + ), ] -BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ - { - "rule_id": "global/underride/.m.rule.call", - "conditions": [ +BASE_APPEND_UNDERRIDE_RULES = [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["underride"], + rule_id="global/underride/.m.rule.call", + conditions=[ { "kind": "event_match", "key": "type", @@ -314,17 +430,19 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_call", } ], - "actions": [ + actions=[ "notify", {"set_tweak": "sound", "value": "ring"}, {"set_tweak": "highlight", "value": False}, ], - }, + ), # XXX: once m.direct is standardised everywhere, we should use it to detect # a DM from the user's perspective rather than this heuristic. - { - "rule_id": "global/underride/.m.rule.room_one_to_one", - "conditions": [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["underride"], + rule_id="global/underride/.m.rule.room_one_to_one", + conditions=[ {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, { "kind": "event_match", @@ -333,17 +451,19 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_message", }, ], - "actions": [ + actions=[ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight", "value": False}, ], - }, + ), # XXX: this is going to fire for events which aren't m.room.messages # but are encrypted (e.g. m.call.*)... - { - "rule_id": "global/underride/.m.rule.encrypted_room_one_to_one", - "conditions": [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["underride"], + rule_id="global/underride/.m.rule.encrypted_room_one_to_one", + conditions=[ {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, { "kind": "event_match", @@ -352,15 +472,17 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_encrypted", }, ], - "actions": [ + actions=[ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight", "value": False}, ], - }, - { - "rule_id": "global/underride/.org.matrix.msc3772.thread_reply", - "conditions": [ + ), + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["underride"], + rule_id="global/underride/.org.matrix.msc3772.thread_reply", + conditions=[ { "kind": "org.matrix.msc3772.relation_match", "rel_type": "m.thread", @@ -368,11 +490,13 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "sender_type": "user_id", } ], - "actions": ["notify", {"set_tweak": "highlight", "value": False}], - }, - { - "rule_id": "global/underride/.m.rule.message", - "conditions": [ + actions=["notify", {"set_tweak": "highlight", "value": False}], + ), + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["underride"], + rule_id="global/underride/.m.rule.message", + conditions=[ { "kind": "event_match", "key": "type", @@ -380,13 +504,15 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_message", } ], - "actions": ["notify", {"set_tweak": "highlight", "value": False}], - }, + actions=["notify", {"set_tweak": "highlight", "value": False}], + ), # XXX: this is going to fire for events which aren't m.room.messages # but are encrypted (e.g. m.call.*)... - { - "rule_id": "global/underride/.m.rule.encrypted", - "conditions": [ + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["underride"], + rule_id="global/underride/.m.rule.encrypted", + conditions=[ { "kind": "event_match", "key": "type", @@ -394,11 +520,13 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_encrypted", } ], - "actions": ["notify", {"set_tweak": "highlight", "value": False}], - }, - { - "rule_id": "global/underride/.im.vector.jitsi", - "conditions": [ + actions=["notify", {"set_tweak": "highlight", "value": False}], + ), + PushRule( + default=True, + priority_class=PRIORITY_CLASS_MAP["underride"], + rule_id="global/underride/.im.vector.jitsi", + conditions=[ { "kind": "event_match", "key": "type", @@ -418,29 +546,27 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_is_state_event", }, ], - "actions": ["notify", {"set_tweak": "highlight", "value": False}], - }, + actions=["notify", {"set_tweak": "highlight", "value": False}], + ), ] BASE_RULE_IDS = set() +BASE_RULES_BY_ID: Dict[str, PushRule] = {} + for r in BASE_APPEND_CONTENT_RULES: - r["priority_class"] = PRIORITY_CLASS_MAP["content"] - r["default"] = True - BASE_RULE_IDS.add(r["rule_id"]) + BASE_RULE_IDS.add(r.rule_id) + BASE_RULES_BY_ID[r.rule_id] = r for r in BASE_PREPEND_OVERRIDE_RULES: - r["priority_class"] = PRIORITY_CLASS_MAP["override"] - r["default"] = True - BASE_RULE_IDS.add(r["rule_id"]) + BASE_RULE_IDS.add(r.rule_id) + BASE_RULES_BY_ID[r.rule_id] = r for r in BASE_APPEND_OVERRIDE_RULES: - r["priority_class"] = PRIORITY_CLASS_MAP["override"] - r["default"] = True - BASE_RULE_IDS.add(r["rule_id"]) + BASE_RULE_IDS.add(r.rule_id) + BASE_RULES_BY_ID[r.rule_id] = r for r in BASE_APPEND_UNDERRIDE_RULES: - r["priority_class"] = PRIORITY_CLASS_MAP["underride"] - r["default"] = True - BASE_RULE_IDS.add(r["rule_id"]) + BASE_RULE_IDS.add(r.rule_id) + BASE_RULES_BY_ID[r.rule_id] = r diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 713dcf6950..ccd512be54 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -15,7 +15,18 @@ import itertools import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, + Union, +) from prometheus_client import Counter @@ -30,6 +41,7 @@ from synapse.util.caches import register_cache from synapse.util.metrics import measure_func from synapse.visibility import filter_event_for_clients_with_state +from .baserules import FilteredPushRules, PushRule from .push_rule_evaluator import PushRuleEvaluatorForEvent if TYPE_CHECKING: @@ -112,7 +124,7 @@ class BulkPushRuleEvaluator: async def _get_rules_for_event( self, event: EventBase, - ) -> Dict[str, List[Dict[str, Any]]]: + ) -> Dict[str, FilteredPushRules]: """Get the push rules for all users who may need to be notified about the event. @@ -186,7 +198,7 @@ class BulkPushRuleEvaluator: return pl_event.content if pl_event else {}, sender_level async def _get_mutual_relations( - self, event: EventBase, rules: Iterable[Dict[str, Any]] + self, event: EventBase, rules: Iterable[Tuple[PushRule, bool]] ) -> Dict[str, Set[Tuple[str, str]]]: """ Fetch event metadata for events which related to the same event as the given event. @@ -216,12 +228,11 @@ class BulkPushRuleEvaluator: # Pre-filter to figure out which relation types are interesting. rel_types = set() - for rule in rules: - # Skip disabled rules. - if "enabled" in rule and not rule["enabled"]: + for rule, enabled in rules: + if not enabled: continue - for condition in rule["conditions"]: + for condition in rule.conditions: if condition["kind"] != "org.matrix.msc3772.relation_match": continue @@ -254,7 +265,7 @@ class BulkPushRuleEvaluator: count_as_unread = _should_count_as_unread(event, context) rules_by_user = await self._get_rules_for_event(event) - actions_by_user: Dict[str, List[Union[dict, str]]] = {} + actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {} room_member_count = await self.store.get_number_joined_users_in_room( event.room_id @@ -317,15 +328,13 @@ class BulkPushRuleEvaluator: # current user, it'll be added to the dict later. actions_by_user[uid] = [] - for rule in rules: - if "enabled" in rule and not rule["enabled"]: + for rule, enabled in rules: + if not enabled: continue - matches = evaluator.check_conditions( - rule["conditions"], uid, display_name - ) + matches = evaluator.check_conditions(rule.conditions, uid, display_name) if matches: - actions = [x for x in rule["actions"] if x != "dont_notify"] + actions = [x for x in rule.actions if x != "dont_notify"] if actions and "notify" in actions: # Push rules say we should notify the user of this event actions_by_user[uid] = actions diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 5117ef6854..73618d9234 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -18,16 +18,15 @@ from typing import Any, Dict, List, Optional from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP from synapse.types import UserID +from .baserules import FilteredPushRules, PushRule + def format_push_rules_for_user( - user: UserID, ruleslist: List + user: UserID, ruleslist: FilteredPushRules ) -> Dict[str, Dict[str, list]]: """Converts a list of rawrules and a enabled map into nested dictionaries to match the Matrix client-server format for push rules""" - # We're going to be mutating this a lot, so do a deep copy - ruleslist = copy.deepcopy(ruleslist) - rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = { "global": {}, "device": {}, @@ -35,11 +34,30 @@ def format_push_rules_for_user( rules["global"] = _add_empty_priority_class_arrays(rules["global"]) - for r in ruleslist: - template_name = _priority_class_to_template_name(r["priority_class"]) + for r, enabled in ruleslist: + template_name = _priority_class_to_template_name(r.priority_class) + + rulearray = rules["global"][template_name] + + template_rule = _rule_to_template(r) + if not template_rule: + continue + + rulearray.append(template_rule) + + template_rule["enabled"] = enabled + + if "conditions" not in template_rule: + # Not all formatted rules have explicit conditions, e.g. "room" + # rules omit them as they can be derived from the kind and rule ID. + # + # If the formatted rule has no conditions then we can skip the + # formatting of conditions. + continue # Remove internal stuff. - for c in r["conditions"]: + template_rule["conditions"] = copy.deepcopy(template_rule["conditions"]) + for c in template_rule["conditions"]: c.pop("_cache_key", None) pattern_type = c.pop("pattern_type", None) @@ -52,16 +70,6 @@ def format_push_rules_for_user( if sender_type == "user_id": c["sender"] = user.to_string() - rulearray = rules["global"][template_name] - - template_rule = _rule_to_template(r) - if template_rule: - if "enabled" in r: - template_rule["enabled"] = r["enabled"] - else: - template_rule["enabled"] = True - rulearray.append(template_rule) - return rules @@ -71,24 +79,24 @@ def _add_empty_priority_class_arrays(d: Dict[str, list]) -> Dict[str, list]: return d -def _rule_to_template(rule: Dict[str, Any]) -> Optional[Dict[str, Any]]: - unscoped_rule_id = None - if "rule_id" in rule: - unscoped_rule_id = _rule_id_from_namespaced(rule["rule_id"]) +def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]: + templaterule: Dict[str, Any] + + unscoped_rule_id = _rule_id_from_namespaced(rule.rule_id) - template_name = _priority_class_to_template_name(rule["priority_class"]) + template_name = _priority_class_to_template_name(rule.priority_class) if template_name in ["override", "underride"]: - templaterule = {k: rule[k] for k in ["conditions", "actions"]} + templaterule = {"conditions": rule.conditions, "actions": rule.actions} elif template_name in ["sender", "room"]: - templaterule = {"actions": rule["actions"]} - unscoped_rule_id = rule["conditions"][0]["pattern"] + templaterule = {"actions": rule.actions} + unscoped_rule_id = rule.conditions[0]["pattern"] elif template_name == "content": - if len(rule["conditions"]) != 1: + if len(rule.conditions) != 1: return None - thecond = rule["conditions"][0] + thecond = rule.conditions[0] if "pattern" not in thecond: return None - templaterule = {"actions": rule["actions"]} + templaterule = {"actions": rule.actions} templaterule["pattern"] = thecond["pattern"] else: # This should not be reached unless this function is not kept in sync @@ -97,8 +105,8 @@ def _rule_to_template(rule: Dict[str, Any]) -> Optional[Dict[str, Any]]: if unscoped_rule_id: templaterule["rule_id"] = unscoped_rule_id - if "default" in rule: - templaterule["default"] = rule["default"] + if rule.default: + templaterule["default"] = True return templaterule diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 2e8a017add..3c5632cd91 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -15,7 +15,18 @@ import logging import re -from typing import Any, Dict, List, Mapping, Optional, Pattern, Set, Tuple, Union +from typing import ( + Any, + Dict, + List, + Mapping, + Optional, + Pattern, + Sequence, + Set, + Tuple, + Union, +) from matrix_common.regex import glob_to_regex, to_word_pattern @@ -32,14 +43,14 @@ INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") def _room_member_count( - ev: EventBase, condition: Dict[str, Any], room_member_count: int + ev: EventBase, condition: Mapping[str, Any], room_member_count: int ) -> bool: return _test_ineq_condition(condition, room_member_count) def _sender_notification_permission( ev: EventBase, - condition: Dict[str, Any], + condition: Mapping[str, Any], sender_power_level: int, power_levels: Dict[str, Union[int, Dict[str, int]]], ) -> bool: @@ -54,7 +65,7 @@ def _sender_notification_permission( return sender_power_level >= room_notif_level -def _test_ineq_condition(condition: Dict[str, Any], number: int) -> bool: +def _test_ineq_condition(condition: Mapping[str, Any], number: int) -> bool: if "is" not in condition: return False m = INEQUALITY_EXPR.match(condition["is"]) @@ -137,7 +148,7 @@ class PushRuleEvaluatorForEvent: self._condition_cache: Dict[str, bool] = {} def check_conditions( - self, conditions: List[dict], uid: str, display_name: Optional[str] + self, conditions: Sequence[Mapping], uid: str, display_name: Optional[str] ) -> bool: """ Returns true if a user's conditions/user ID/display name match the event. @@ -169,7 +180,7 @@ class PushRuleEvaluatorForEvent: return True def matches( - self, condition: Dict[str, Any], user_id: str, display_name: Optional[str] + self, condition: Mapping[str, Any], user_id: str, display_name: Optional[str] ) -> bool: """ Returns true if a user's condition/user ID/display name match the event. @@ -204,7 +215,7 @@ class PushRuleEvaluatorForEvent: # endpoint with an unknown kind, see _rule_tuple_from_request_object. return True - def _event_match(self, condition: dict, user_id: str) -> bool: + def _event_match(self, condition: Mapping, user_id: str) -> bool: """ Check an "event_match" push rule condition. @@ -269,7 +280,7 @@ class PushRuleEvaluatorForEvent: return bool(r.search(body)) - def _relation_match(self, condition: dict, user_id: str) -> bool: + def _relation_match(self, condition: Mapping, user_id: str) -> bool: """ Check an "relation_match" push rule condition. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index f62aa45ca1..eabf9c9739 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -74,7 +74,17 @@ receipt. """ import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + List, + Mapping, + Optional, + Tuple, + Union, + cast, +) import attr @@ -154,7 +164,9 @@ class NotifCounts: highlight_count: int = 0 -def _serialize_action(actions: List[Union[dict, str]], is_highlight: bool) -> str: +def _serialize_action( + actions: Collection[Union[Mapping, str]], is_highlight: bool +) -> str: """Custom serializer for actions. This allows us to "compress" common actions. We use the fact that most users have the same actions for notifs (and for @@ -750,7 +762,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas async def add_push_actions_to_staging( self, event_id: str, - user_id_actions: Dict[str, List[Union[dict, str]]], + user_id_actions: Dict[str, Collection[Union[Mapping, str]]], count_as_unread: bool, ) -> None: """Add the push actions for the event to the push action staging area. @@ -767,7 +779,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # This is a helper function for generating the necessary tuple that # can be used to insert into the `event_push_actions_staging` table. def _gen_entry( - user_id: str, actions: List[Union[dict, str]] + user_id: str, actions: Collection[Union[Mapping, str]] ) -> Tuple[str, str, str, int, int, int]: is_highlight = 1 if _action_has_highlight(actions) else 0 notif = 1 if "notify" in actions else 0 @@ -1410,7 +1422,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): ] -def _action_has_highlight(actions: List[Union[dict, str]]) -> bool: +def _action_has_highlight(actions: Collection[Union[Mapping, str]]) -> bool: for action in actions: if not isinstance(action, dict): continue diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 768f95d16c..255620f996 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -14,11 +14,23 @@ # limitations under the License. import abc import logging -from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Tuple, Union, cast +from typing import ( + TYPE_CHECKING, + Any, + Collection, + Dict, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, + cast, +) from synapse.api.errors import StoreError from synapse.config.homeserver import ExperimentalConfig -from synapse.push.baserules import list_with_base_rules +from synapse.push.baserules import FilteredPushRules, PushRule, compile_push_rules from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -50,60 +62,30 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -def _is_experimental_rule_enabled( - rule_id: str, experimental_config: ExperimentalConfig -) -> bool: - """Used by `_load_rules` to filter out experimental rules when they - have not been enabled. - """ - if ( - rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl" - and not experimental_config.msc3786_enabled - ): - return False - if ( - rule_id == "global/underride/.org.matrix.msc3772.thread_reply" - and not experimental_config.msc3772_enabled - ): - return False - return True - - def _load_rules( rawrules: List[JsonDict], enabled_map: Dict[str, bool], experimental_config: ExperimentalConfig, -) -> List[JsonDict]: - ruleslist = [] - for rawrule in rawrules: - rule = dict(rawrule) - rule["conditions"] = db_to_json(rawrule["conditions"]) - rule["actions"] = db_to_json(rawrule["actions"]) - rule["default"] = False - ruleslist.append(rule) - - # We're going to be mutating this a lot, so copy it. We also filter out - # any experimental default push rules that aren't enabled. - rules = [ - rule - for rule in list_with_base_rules(ruleslist) - if _is_experimental_rule_enabled(rule["rule_id"], experimental_config) - ] +) -> FilteredPushRules: + """Take the DB rows returned from the DB and convert them into a full + `FilteredPushRules` object. + """ - for i, rule in enumerate(rules): - rule_id = rule["rule_id"] + ruleslist = [ + PushRule( + rule_id=rawrule["rule_id"], + priority_class=rawrule["priority_class"], + conditions=db_to_json(rawrule["conditions"]), + actions=db_to_json(rawrule["actions"]), + ) + for rawrule in rawrules + ] - if rule_id not in enabled_map: - continue - if rule.get("enabled", True) == bool(enabled_map[rule_id]): - continue + push_rules = compile_push_rules(ruleslist) - # Rules are cached across users. - rule = dict(rule) - rule["enabled"] = bool(enabled_map[rule_id]) - rules[i] = rule + filtered_rules = FilteredPushRules(push_rules, enabled_map, experimental_config) - return rules + return filtered_rules # The ABCMeta metaclass ensures that it cannot be instantiated without @@ -162,7 +144,7 @@ class PushRulesWorkerStore( raise NotImplementedError() @cached(max_entries=5000) - async def get_push_rules_for_user(self, user_id: str) -> List[JsonDict]: + async def get_push_rules_for_user(self, user_id: str) -> FilteredPushRules: rows = await self.db_pool.simple_select_list( table="push_rules", keyvalues={"user_name": user_id}, @@ -216,11 +198,11 @@ class PushRulesWorkerStore( @cachedList(cached_method_name="get_push_rules_for_user", list_name="user_ids") async def bulk_get_push_rules( self, user_ids: Collection[str] - ) -> Dict[str, List[JsonDict]]: + ) -> Dict[str, FilteredPushRules]: if not user_ids: return {} - results: Dict[str, List[JsonDict]] = {user_id: [] for user_id in user_ids} + raw_rules: Dict[str, List[JsonDict]] = {user_id: [] for user_id in user_ids} rows = await self.db_pool.simple_select_many_batch( table="push_rules", @@ -234,11 +216,13 @@ class PushRulesWorkerStore( rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))) for row in rows: - results.setdefault(row["user_name"], []).append(row) + raw_rules.setdefault(row["user_name"], []).append(row) enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids) - for user_id, rules in results.items(): + results: Dict[str, FilteredPushRules] = {} + + for user_id, rules in raw_rules.items(): results[user_id] = _load_rules( rules, enabled_map_by_user.get(user_id, {}), self.hs.config.experimental ) @@ -345,8 +329,8 @@ class PushRuleStore(PushRulesWorkerStore): user_id: str, rule_id: str, priority_class: int, - conditions: List[Dict[str, str]], - actions: List[Union[JsonDict, str]], + conditions: Sequence[Mapping[str, str]], + actions: Sequence[Union[Mapping[str, Any], str]], before: Optional[str] = None, after: Optional[str] = None, ) -> None: @@ -817,7 +801,7 @@ class PushRuleStore(PushRulesWorkerStore): return self._push_rules_stream_id_gen.get_current_token() async def copy_push_rule_from_room_to_room( - self, new_room_id: str, user_id: str, rule: dict + self, new_room_id: str, user_id: str, rule: PushRule ) -> None: """Copy a single push rule from one room to another for a specific user. @@ -827,21 +811,27 @@ class PushRuleStore(PushRulesWorkerStore): rule: A push rule. """ # Create new rule id - rule_id_scope = "/".join(rule["rule_id"].split("/")[:-1]) + rule_id_scope = "/".join(rule.rule_id.split("/")[:-1]) new_rule_id = rule_id_scope + "/" + new_room_id + new_conditions = [] + # Change room id in each condition - for condition in rule.get("conditions", []): + for condition in rule.conditions: + new_condition = condition if condition.get("key") == "room_id": - condition["pattern"] = new_room_id + new_condition = dict(condition) + new_condition["pattern"] = new_room_id + + new_conditions.append(new_condition) # Add the rule for the new room await self.add_push_rule( user_id=user_id, rule_id=new_rule_id, - priority_class=rule["priority_class"], - conditions=rule["conditions"], - actions=rule["actions"], + priority_class=rule.priority_class, + conditions=new_conditions, + actions=rule.actions, ) async def copy_push_rules_from_room_to_room_for_user( @@ -859,8 +849,11 @@ class PushRuleStore(PushRulesWorkerStore): user_push_rules = await self.get_push_rules_for_user(user_id) # Get rules relating to the old room and copy them to the new room - for rule in user_push_rules: - conditions = rule.get("conditions", []) + for rule, enabled in user_push_rules: + if not enabled: + continue + + conditions = rule.conditions if any( (c.get("key") == "room_id" and c.get("pattern") == old_room_id) for c in conditions diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py index ff9f2e8edb..82baa8f154 100644 --- a/tests/handlers/test_deactivate_account.py +++ b/tests/handlers/test_deactivate_account.py @@ -11,11 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import AccountDataTypes +from synapse.push.baserules import PushRule from synapse.push.rulekinds import PRIORITY_CLASS_MAP from synapse.rest import admin from synapse.rest.client import account, login @@ -130,12 +130,12 @@ class DeactivateAccountTestCase(HomeserverTestCase): ), ) - def _is_custom_rule(self, push_rule: Dict[str, Any]) -> bool: + def _is_custom_rule(self, push_rule: PushRule) -> bool: """ Default rules start with a dot: such as .m.rule and .im.vector. This function returns true iff a rule is custom (not default). """ - return "/." not in push_rule["rule_id"] + return "/." not in push_rule.rule_id def test_push_rules_deleted_upon_account_deactivation(self) -> None: """ @@ -157,22 +157,21 @@ class DeactivateAccountTestCase(HomeserverTestCase): ) # Test the rule exists - push_rules = self.get_success(self._store.get_push_rules_for_user(self.user)) + filtered_push_rules = self.get_success( + self._store.get_push_rules_for_user(self.user) + ) # Filter out default rules; we don't care - push_rules = list(filter(self._is_custom_rule, push_rules)) + push_rules = [r for r, _ in filtered_push_rules if self._is_custom_rule(r)] # Check our rule made it self.assertEqual( push_rules, [ - { - "user_name": "@user:test", - "rule_id": "personal.override.rule1", - "priority_class": 5, - "priority": 0, - "conditions": [], - "actions": [], - "default": False, - } + PushRule( + rule_id="personal.override.rule1", + priority_class=5, + conditions=[], + actions=[], + ) ], push_rules, ) @@ -180,9 +179,11 @@ class DeactivateAccountTestCase(HomeserverTestCase): # Request the deactivation of our account self._deactivate_my_account() - push_rules = self.get_success(self._store.get_push_rules_for_user(self.user)) + filtered_push_rules = self.get_success( + self._store.get_push_rules_for_user(self.user) + ) # Filter out default rules; we don't care - push_rules = list(filter(self._is_custom_rule, push_rules)) + push_rules = [r for r, _ in filtered_push_rules if self._is_custom_rule(r)] # Check our rule no longer exists self.assertEqual(push_rules, [], push_rules) -- cgit 1.4.1 From c3516e9decc355b75a297d72a13b98a43d312e66 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 16 Aug 2022 12:16:56 +0000 Subject: Faster room joins: make `/joined_members` block whilst the room is partial stated. (#13514) --- changelog.d/13514.bugfix | 1 + synapse/handlers/message.py | 6 +++++- synapse/storage/controllers/state.py | 13 +++++++++++++ synapse/storage/databases/main/roommember.py | 3 +++ 4 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13514.bugfix diff --git a/changelog.d/13514.bugfix b/changelog.d/13514.bugfix new file mode 100644 index 0000000000..7498af0e47 --- /dev/null +++ b/changelog.d/13514.bugfix @@ -0,0 +1 @@ +Faster room joins: make `/joined_members` block whilst the room is partial stated. \ No newline at end of file diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6b03603598..8f29ee9a87 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -331,7 +331,11 @@ class MessageHandler: msg="Getting joined members while not being a current member of the room is forbidden.", ) - users_with_profile = await self.store.get_users_in_room_with_profiles(room_id) + users_with_profile = ( + await self._state_storage_controller.get_users_in_room_with_profiles( + room_id + ) + ) # If this is an AS, double check that they are allowed to see the members. # This can either be because the AS user is in the room or because there diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 0d480f1014..0c78eb735e 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -30,6 +30,7 @@ from typing import ( from synapse.api.constants import EventTypes from synapse.events import EventBase from synapse.logging.opentracing import trace +from synapse.storage.roommember import ProfileInfo from synapse.storage.state import StateFilter from synapse.storage.util.partial_state_events_tracker import ( PartialCurrentStateTracker, @@ -506,3 +507,15 @@ class StateStorageController: await self._partial_state_room_tracker.await_full_state(room_id) return await self.stores.main.get_current_hosts_in_room(room_id) + + async def get_users_in_room_with_profiles( + self, room_id: str + ) -> Dict[str, ProfileInfo]: + """ + Get the current users in the room with their profiles. + If the room is currently partial-stated, this will block until the room has + full state. + """ + await self._partial_state_room_tracker.await_full_state(room_id) + + return await self.stores.main.get_users_in_room_with_profiles(room_id) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 93ff4816c8..5e5f607a14 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -283,6 +283,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): Returns: A mapping from user ID to ProfileInfo. + + Preconditions: + - There is full state available for the room (it is not partial-stated). """ def _get_users_in_room_with_profiles( -- cgit 1.4.1 From 14e673ef9db086e5537f3d75a057c7d583dd954b Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 16 Aug 2022 17:20:56 +0200 Subject: Add missing links in `user_consent` section of configuration manual (#13536) Signed-off-by: Dirk Klimpel --- changelog.d/13536.doc | 1 + docs/usage/configuration/config_documentation.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13536.doc diff --git a/changelog.d/13536.doc b/changelog.d/13536.doc new file mode 100644 index 0000000000..c8752acb77 --- /dev/null +++ b/changelog.d/13536.doc @@ -0,0 +1 @@ +Add missing links in `user_consent` section of configuration manual. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index d8c29e6063..aa175a0d91 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3352,7 +3352,7 @@ user_directory: For detailed instructions on user consent configuration, see [here](../../consent_tracking.md). Parts of this section are required if enabling the `consent` resource under -`listeners`, in particular `template_dir` and `version`. # TODO: link `listeners` +[`listeners`](#listeners), in particular `template_dir` and `version`. * `template_dir`: gives the location of the templates for the HTML forms. This directory should contain one subdirectory per language (eg, `en`, `fr`), @@ -3364,7 +3364,7 @@ Parts of this section are required if enabling the `consent` resource under parameter. * `server_notice_content`: if enabled, will send a user a "Server Notice" - asking them to consent to the privacy policy. The `server_notices` section ##TODO: link + asking them to consent to the privacy policy. The [`server_notices` section](#server_notices) must also be configured for this to work. Notices will *not* be sent to guest users unless `send_server_notice_to_guests` is set to true. -- cgit 1.4.1 From f4ab6a4a96ceb02e260a3d025ff6c1e6cfefe4ed Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:21:55 +0100 Subject: Refuse to start when `faster_joins` is enabled on a worker deployment (#13531) Synapse does not currently support faster room joins on deployments with workers. Signed-off-by: Sean Quah --- changelog.d/13531.misc | 1 + synapse/app/generic_worker.py | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/13531.misc diff --git a/changelog.d/13531.misc b/changelog.d/13531.misc new file mode 100644 index 0000000000..986122d3d0 --- /dev/null +++ b/changelog.d/13531.misc @@ -0,0 +1 @@ +Faster room joins: Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 42d1f6d219..30e21d9707 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -441,6 +441,13 @@ def start(config_options: List[str]) -> None: "synapse.app.user_dir", ) + if config.experimental.faster_joins_enabled: + raise ConfigError( + "You have enabled the experimental `faster_joins` config option, but it is " + "not compatible with worker deployments yet. Please disable `faster_joins` " + "or run Synapse as a single process deployment instead." + ) + synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage -- cgit 1.4.1 From 738c11729afcb978b7d9551af3c4cf7990163d07 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 16 Aug 2022 16:52:07 +0100 Subject: 1.65.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 0bc8f95855..0faf79e9ab 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.65.0 (2022-08-16) +=========================== + +No significant changes since 1.65.0rc2. + + Synapse 1.65.0rc2 (2022-08-11) ============================== diff --git a/debian/changelog b/debian/changelog index 21115d7be2..917249f940 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.65.0) stable; urgency=medium + + * New Synapse release 1.65.0. + + -- Synapse Packaging team Tue, 16 Aug 2022 16:51:26 +0100 + matrix-synapse-py3 (1.65.0~rc2) stable; urgency=medium * New Synapse release 1.65.0rc2. diff --git a/pyproject.toml b/pyproject.toml index a9f59a676f..ecc9d3e878 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.65.0rc2" +version = "1.65.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From 06a27338813371bd17a76947f82b3167c484f2ac Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 16 Aug 2022 17:24:53 +0100 Subject: Note explicitly that #11365 was reverted in the 1.65 CHANGELOG --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 0faf79e9ab..8a023b769f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -31,7 +31,7 @@ Bugfixes -------- - Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470)) -- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13365](https://github.com/matrix-org/synapse/issues/13365)) +- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365)) - Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374)) - Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392)) - Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408)) -- cgit 1.4.1 From 0a4efbc1ddc3a58a6d75ad5d4d960b9ed367481e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 Aug 2022 12:39:40 -0500 Subject: Instrument the federation/backfill part of `/messages` (#13489) Instrument the federation/backfill part of `/messages` so it's easier to follow what's going on in Jaeger when viewing a trace. Split out from https://github.com/matrix-org/synapse/pull/13440 Follow-up from https://github.com/matrix-org/synapse/pull/13368 Part of https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/13489.misc | 1 + synapse/federation/federation_client.py | 27 ++++- synapse/handlers/federation.py | 10 +- synapse/handlers/federation_event.py | 112 ++++++++++++++++++--- synapse/logging/opentracing.py | 19 +++- synapse/storage/controllers/persist_events.py | 30 ++++-- synapse/storage/controllers/state.py | 5 +- synapse/storage/databases/main/event_federation.py | 6 ++ synapse/storage/databases/main/events.py | 2 + synapse/storage/databases/main/events_worker.py | 38 +++++-- .../storage/util/partial_state_events_tracker.py | 3 + 11 files changed, 220 insertions(+), 33 deletions(-) create mode 100644 changelog.d/13489.misc diff --git a/changelog.d/13489.misc b/changelog.d/13489.misc new file mode 100644 index 0000000000..5e4853860e --- /dev/null +++ b/changelog.d/13489.misc @@ -0,0 +1 @@ +Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 54ffbd8170..987f6dad46 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -61,7 +61,7 @@ from synapse.federation.federation_base import ( ) from synapse.federation.transport.client import SendJoinResponse from synapse.http.types import QueryParams -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -235,6 +235,7 @@ class FederationClient(FederationBase): ) @trace + @tag_args async def backfill( self, dest: str, room_id: str, limit: int, extremities: Collection[str] ) -> Optional[List[EventBase]]: @@ -337,6 +338,8 @@ class FederationClient(FederationBase): return None + @trace + @tag_args async def get_pdu( self, destinations: Iterable[str], @@ -448,6 +451,8 @@ class FederationClient(FederationBase): return event_copy + @trace + @tag_args async def get_room_state_ids( self, destination: str, room_id: str, event_id: str ) -> Tuple[List[str], List[str]]: @@ -467,6 +472,23 @@ class FederationClient(FederationBase): state_event_ids = result["pdu_ids"] auth_event_ids = result.get("auth_chain_ids", []) + set_tag( + SynapseTags.RESULT_PREFIX + "state_event_ids", + str(state_event_ids), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "state_event_ids.length", + str(len(state_event_ids)), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "auth_event_ids", + str(auth_event_ids), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "auth_event_ids.length", + str(len(auth_event_ids)), + ) + if not isinstance(state_event_ids, list) or not isinstance( auth_event_ids, list ): @@ -474,6 +496,8 @@ class FederationClient(FederationBase): return state_event_ids, auth_event_ids + @trace + @tag_args async def get_room_state( self, destination: str, @@ -533,6 +557,7 @@ class FederationClient(FederationBase): return valid_state_events, valid_auth_events + @trace async def _check_sigs_and_hash_and_fetch( self, origin: str, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6f5ab86ac4..d13011d138 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -59,7 +59,7 @@ from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context -from synapse.logging.opentracing import tag_args, trace +from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import NOT_SPAM from synapse.replication.http.federation import ( @@ -370,6 +370,14 @@ class FederationHandler: logger.debug( "_maybe_backfill_inner: extremities_to_request %s", extremities_to_request ) + set_tag( + SynapseTags.RESULT_PREFIX + "extremities_to_request", + str(extremities_to_request), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "extremities_to_request.length", + str(len(extremities_to_request)), + ) # Now we need to decide which hosts to hit first. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 8968b705d4..dd0d610fe9 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -59,7 +59,13 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.federation.federation_client import InvalidResponseError from synapse.logging.context import nested_logging_context -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import ( + SynapseTags, + set_tag, + start_active_span, + tag_args, + trace, +) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.replication.http.federation import ( @@ -410,6 +416,7 @@ class FederationEventHandler: prev_member_event, ) + @trace async def process_remote_join( self, origin: str, @@ -715,7 +722,7 @@ class FederationEventHandler: @trace async def _process_pulled_events( - self, origin: str, events: Iterable[EventBase], backfilled: bool + self, origin: str, events: Collection[EventBase], backfilled: bool ) -> None: """Process a batch of events we have pulled from a remote server @@ -730,6 +737,15 @@ class FederationEventHandler: backfilled: True if this is part of a historical batch of events (inhibits notification to clients, and validation of device keys.) """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids", + str([event.event_id for event in events]), + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(events)), + ) + set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) logger.debug( "processing pulled backfilled=%s events=%s", backfilled, @@ -753,6 +769,7 @@ class FederationEventHandler: await self._process_pulled_event(origin, ev, backfilled=backfilled) @trace + @tag_args async def _process_pulled_event( self, origin: str, event: EventBase, backfilled: bool ) -> None: @@ -854,6 +871,7 @@ class FederationEventHandler: else: raise + @trace async def _compute_event_context_with_maybe_missing_prevs( self, dest: str, event: EventBase ) -> EventContext: @@ -970,6 +988,8 @@ class FederationEventHandler: event, state_ids_before_event=state_map, partial_state=partial_state ) + @trace + @tag_args async def _get_state_ids_after_missing_prev_event( self, destination: str, @@ -1009,10 +1029,10 @@ class FederationEventHandler: logger.debug("Fetching %i events from cache/store", len(desired_events)) have_events = await self._store.have_seen_events(room_id, desired_events) - missing_desired_events = desired_events - have_events + missing_desired_event_ids = desired_events - have_events logger.debug( "We are missing %i events (got %i)", - len(missing_desired_events), + len(missing_desired_event_ids), len(have_events), ) @@ -1024,13 +1044,30 @@ class FederationEventHandler: # already have a bunch of the state events. It would be nice if the # federation api gave us a way of finding out which we actually need. - missing_auth_events = set(auth_event_ids) - have_events - missing_auth_events.difference_update( - await self._store.have_seen_events(room_id, missing_auth_events) + missing_auth_event_ids = set(auth_event_ids) - have_events + missing_auth_event_ids.difference_update( + await self._store.have_seen_events(room_id, missing_auth_event_ids) ) - logger.debug("We are also missing %i auth events", len(missing_auth_events)) + logger.debug("We are also missing %i auth events", len(missing_auth_event_ids)) - missing_events = missing_desired_events | missing_auth_events + missing_event_ids = missing_desired_event_ids | missing_auth_event_ids + + set_tag( + SynapseTags.RESULT_PREFIX + "missing_auth_event_ids", + str(missing_auth_event_ids), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "missing_auth_event_ids.length", + str(len(missing_auth_event_ids)), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "missing_desired_event_ids", + str(missing_desired_event_ids), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "missing_desired_event_ids.length", + str(len(missing_desired_event_ids)), + ) # Making an individual request for each of 1000s of events has a lot of # overhead. On the other hand, we don't really want to fetch all of the events @@ -1041,13 +1078,13 @@ class FederationEventHandler: # # TODO: might it be better to have an API which lets us do an aggregate event # request - if (len(missing_events) * 10) >= len(auth_event_ids) + len(state_event_ids): + if (len(missing_event_ids) * 10) >= len(auth_event_ids) + len(state_event_ids): logger.debug("Requesting complete state from remote") await self._get_state_and_persist(destination, room_id, event_id) else: - logger.debug("Fetching %i events from remote", len(missing_events)) + logger.debug("Fetching %i events from remote", len(missing_event_ids)) await self._get_events_and_persist( - destination=destination, room_id=room_id, event_ids=missing_events + destination=destination, room_id=room_id, event_ids=missing_event_ids ) # We now need to fill out the state map, which involves fetching the @@ -1104,6 +1141,14 @@ class FederationEventHandler: event_id, failed_to_fetch, ) + set_tag( + SynapseTags.RESULT_PREFIX + "failed_to_fetch", + str(failed_to_fetch), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "failed_to_fetch.length", + str(len(failed_to_fetch)), + ) if remote_event.is_state() and remote_event.rejected_reason is None: state_map[ @@ -1112,6 +1157,8 @@ class FederationEventHandler: return state_map + @trace + @tag_args async def _get_state_and_persist( self, destination: str, room_id: str, event_id: str ) -> None: @@ -1133,6 +1180,7 @@ class FederationEventHandler: destination=destination, room_id=room_id, event_ids=(event_id,) ) + @trace async def _process_received_pdu( self, origin: str, @@ -1283,6 +1331,7 @@ class FederationEventHandler: except Exception: logger.exception("Failed to resync device for %s", sender) + @trace async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> None: """Handles backfilling the insertion event when we receive a marker event that points to one. @@ -1414,6 +1463,8 @@ class FederationEventHandler: return event_from_response + @trace + @tag_args async def _get_events_and_persist( self, destination: str, room_id: str, event_ids: Collection[str] ) -> None: @@ -1459,6 +1510,7 @@ class FederationEventHandler: logger.info("Fetched %i events of %i requested", len(events), len(event_ids)) await self._auth_and_persist_outliers(room_id, events) + @trace async def _auth_and_persist_outliers( self, room_id: str, events: Iterable[EventBase] ) -> None: @@ -1477,6 +1529,16 @@ class FederationEventHandler: """ event_map = {event.event_id: event for event in events} + event_ids = event_map.keys() + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids", + str(event_ids), + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + # filter out any events we have already seen. This might happen because # the events were eagerly pushed to us (eg, during a room join), or because # another thread has raced against us since we decided to request the event. @@ -1593,6 +1655,7 @@ class FederationEventHandler: backfilled=True, ) + @trace async def _check_event_auth( self, origin: Optional[str], event: EventBase, context: EventContext ) -> None: @@ -1631,6 +1694,14 @@ class FederationEventHandler: claimed_auth_events = await self._load_or_fetch_auth_events_for_event( origin, event ) + set_tag( + SynapseTags.RESULT_PREFIX + "claimed_auth_events", + str([ev.event_id for ev in claimed_auth_events]), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "claimed_auth_events.length", + str(len(claimed_auth_events)), + ) # ... and check that the event passes auth at those auth events. # https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu: @@ -1728,6 +1799,7 @@ class FederationEventHandler: ) context.rejected = RejectedReason.AUTH_ERROR + @trace async def _maybe_kick_guest_users(self, event: EventBase) -> None: if event.type != EventTypes.GuestAccess: return @@ -1935,6 +2007,8 @@ class FederationEventHandler: # instead we raise an AuthError, which will make the caller ignore it. raise AuthError(code=HTTPStatus.FORBIDDEN, msg="Auth events could not be found") + @trace + @tag_args async def _get_remote_auth_chain_for_event( self, destination: str, room_id: str, event_id: str ) -> None: @@ -1963,6 +2037,7 @@ class FederationEventHandler: await self._auth_and_persist_outliers(room_id, remote_auth_events) + @trace async def _run_push_actions_and_persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False ) -> None: @@ -2071,8 +2146,17 @@ class FederationEventHandler: self._message_handler.maybe_schedule_expiry(event) if not backfilled: # Never notify for backfilled events - for event in events: - await self._notify_persisted_event(event, max_stream_token) + with start_active_span("notify_persisted_events"): + set_tag( + SynapseTags.RESULT_PREFIX + "event_ids", + str([ev.event_id for ev in events]), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "event_ids.length", + str(len(events)), + ) + for event in events: + await self._notify_persisted_event(event, max_stream_token) return max_stream_token.stream diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index d1fa2cf8ae..482316a1ff 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -310,6 +310,19 @@ class SynapseTags: # The name of the external cache CACHE_NAME = "cache.name" + # Used to tag function arguments + # + # Tag a named arg. The name of the argument should be appended to this prefix. + FUNC_ARG_PREFIX = "ARG." + # Tag extra variadic number of positional arguments (`def foo(first, second, *extras)`) + FUNC_ARGS = "args" + # Tag keyword args + FUNC_KWARGS = "kwargs" + + # Some intermediate result that's interesting to the function. The label for + # the result should be appended to this prefix. + RESULT_PREFIX = "RESULT." + class SynapseBaggage: FORCE_TRACING = "synapse-force-tracing" @@ -967,9 +980,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: # first argument only if it's named `self` or `cls`. This isn't fool-proof # but handles the idiomatic cases. for i, arg in enumerate(args[1:], start=1): # type: ignore[index] - set_tag("ARG_" + argspec.args[i], str(arg)) - set_tag("args", str(args[len(argspec.args) :])) # type: ignore[index] - set_tag("kwargs", str(kwargs)) + set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg)) + set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :])) # type: ignore[index] + set_tag(SynapseTags.FUNC_KWARGS, str(kwargs)) yield return _custom_sync_async_decorator(func, _wrapping_logic) diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index cf98b0ab48..dad3731b9b 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -45,8 +45,14 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.logging import opentracing from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable +from synapse.logging.opentracing import ( + SynapseTags, + active_span, + set_tag, + start_active_span_follows_from, + trace, +) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.controllers.state import StateStorageController from synapse.storage.databases import Databases @@ -223,7 +229,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]): queue.append(end_item) # also add our active opentracing span to the item so that we get a link back - span = opentracing.active_span() + span = active_span() if span: end_item.parent_opentracing_span_contexts.append(span.context) @@ -234,7 +240,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]): res = await make_deferred_yieldable(end_item.deferred.observe()) # add another opentracing span which links to the persist trace. - with opentracing.start_active_span_follows_from( + with start_active_span_follows_from( f"{task.name}_complete", (end_item.opentracing_span_context,) ): pass @@ -266,7 +272,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]): queue = self._get_drainining_queue(room_id) for item in queue: try: - with opentracing.start_active_span_follows_from( + with start_active_span_follows_from( item.task.name, item.parent_opentracing_span_contexts, inherit_force_tracing=True, @@ -355,7 +361,7 @@ class EventsPersistenceStorageController: f"Found an unexpected task type in event persistence queue: {task}" ) - @opentracing.trace + @trace async def persist_events( self, events_and_contexts: Iterable[Tuple[EventBase, EventContext]], @@ -380,9 +386,21 @@ class EventsPersistenceStorageController: PartialStateConflictError: if attempting to persist a partial state event in a room that has been un-partial stated. """ + event_ids: List[str] = [] partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {} for event, ctx in events_and_contexts: partitioned.setdefault(event.room_id, []).append((event, ctx)) + event_ids.append(event.event_id) + + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids", + str(event_ids), + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) async def enqueue( item: Tuple[str, List[Tuple[EventBase, EventContext]]] @@ -418,7 +436,7 @@ class EventsPersistenceStorageController: self.main_store.get_room_max_token(), ) - @opentracing.trace + @trace async def persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False ) -> Tuple[EventBase, PersistedEventPosition, RoomStreamToken]: diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 0c78eb735e..1ad002f57b 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -29,7 +29,7 @@ from typing import ( from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import tag_args, trace from synapse.storage.roommember import ProfileInfo from synapse.storage.state import StateFilter from synapse.storage.util.partial_state_events_tracker import ( @@ -229,6 +229,7 @@ class StateStorageController: return {event: event_to_state[event] for event in event_ids} @trace + @tag_args async def get_state_ids_for_events( self, event_ids: Collection[str], @@ -333,6 +334,7 @@ class StateStorageController: ) @trace + @tag_args async def get_state_group_for_events( self, event_ids: Collection[str], @@ -474,6 +476,7 @@ class StateStorageController: prev_stream_id, max_stream_id ) + @trace async def get_current_state( self, room_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 0bc8401f2b..c836078da6 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -712,6 +712,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas # Return all events where not all sets can reach them. return {eid for eid, n in event_to_missing_sets.items() if n} + @trace + @tag_args async def get_oldest_event_ids_with_depth_in_room( self, room_id: str ) -> List[Tuple[str, int]]: @@ -770,6 +772,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas room_id, ) + @trace async def get_insertion_event_backward_extremities_in_room( self, room_id: str ) -> List[Tuple[str, int]]: @@ -1342,6 +1345,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas event_results.reverse() return event_results + @trace + @tag_args async def get_successor_events(self, event_id: str) -> List[str]: """Fetch all events that have the given event as a prev event @@ -1378,6 +1383,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas _delete_old_forward_extrem_cache_txn, ) + @trace async def insert_insertion_extremity(self, event_id: str, room_id: str) -> None: await self.db_pool.simple_upsert( table="insertion_event_extremities", diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 5560b38a48..a4010ee28d 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -40,6 +40,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, relation_from_event from synapse.events.snapshot import EventContext +from synapse.logging.opentracing import trace from synapse.storage._base import db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -145,6 +146,7 @@ class PersistEventsStore: self._backfill_id_gen: AbstractStreamIdGenerator = self.store._backfill_id_gen self._stream_id_gen: AbstractStreamIdGenerator = self.store._stream_id_gen + @trace async def _persist_events_and_state_updates( self, events_and_contexts: List[Tuple[EventBase, EventContext]], diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index b07d812ae2..8a7cdb024d 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -54,6 +54,7 @@ from synapse.logging.context import ( current_context, make_deferred_yieldable, ) +from synapse.logging.opentracing import start_active_span, tag_args, trace from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, @@ -430,6 +431,8 @@ class EventsWorkerStore(SQLBaseStore): return {e.event_id: e for e in events} + @trace + @tag_args async def get_events_as_list( self, event_ids: Collection[str], @@ -1090,23 +1093,42 @@ class EventsWorkerStore(SQLBaseStore): """ fetched_event_ids: Set[str] = set() fetched_events: Dict[str, _EventRow] = {} - events_to_fetch = event_ids - while events_to_fetch: - row_map = await self._enqueue_events(events_to_fetch) + async def _fetch_event_ids_and_get_outstanding_redactions( + event_ids_to_fetch: Collection[str], + ) -> Collection[str]: + """ + Fetch all of the given event_ids and return any associated redaction event_ids + that we still need to fetch in the next iteration. + """ + row_map = await self._enqueue_events(event_ids_to_fetch) # we need to recursively fetch any redactions of those events redaction_ids: Set[str] = set() - for event_id in events_to_fetch: + for event_id in event_ids_to_fetch: row = row_map.get(event_id) fetched_event_ids.add(event_id) if row: fetched_events[event_id] = row redaction_ids.update(row.redactions) - events_to_fetch = redaction_ids.difference(fetched_event_ids) - if events_to_fetch: - logger.debug("Also fetching redaction events %s", events_to_fetch) + event_ids_to_fetch = redaction_ids.difference(fetched_event_ids) + return event_ids_to_fetch + + # Grab the initial list of events requested + event_ids_to_fetch = await _fetch_event_ids_and_get_outstanding_redactions( + event_ids + ) + # Then go and recursively find all of the associated redactions + with start_active_span("recursively fetching redactions"): + while event_ids_to_fetch: + logger.debug("Also fetching redaction events %s", event_ids_to_fetch) + + event_ids_to_fetch = ( + await _fetch_event_ids_and_get_outstanding_redactions( + event_ids_to_fetch + ) + ) # build a map from event_id to EventBase event_map: Dict[str, EventBase] = {} @@ -1424,6 +1446,8 @@ class EventsWorkerStore(SQLBaseStore): return {r["event_id"] for r in rows} + @trace + @tag_args async def have_seen_events( self, room_id: str, event_ids: Iterable[str] ) -> Set[str]: diff --git a/synapse/storage/util/partial_state_events_tracker.py b/synapse/storage/util/partial_state_events_tracker.py index 466e5137f2..b4bf49dace 100644 --- a/synapse/storage/util/partial_state_events_tracker.py +++ b/synapse/storage/util/partial_state_events_tracker.py @@ -20,6 +20,7 @@ from twisted.internet import defer from twisted.internet.defer import Deferred from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable +from synapse.logging.opentracing import trace_with_opname from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore from synapse.util import unwrapFirstError @@ -58,6 +59,7 @@ class PartialStateEventsTracker: for o in observers: o.callback(None) + @trace_with_opname("PartialStateEventsTracker.await_full_state") async def await_full_state(self, event_ids: Collection[str]) -> None: """Wait for all the given events to have full state. @@ -151,6 +153,7 @@ class PartialCurrentStateTracker: for o in observers: o.callback(None) + @trace_with_opname("PartialCurrentStateTracker.await_full_state") async def await_full_state(self, room_id: str) -> None: # We add the deferred immediately so that the DB call to check for # partial state doesn't race when we unpartial the room. -- cgit 1.4.1 From 2c8cfd6d85a61e049344e00170119a679570af0f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 Aug 2022 04:19:21 -0500 Subject: Add specific metric to time long-running `/messages` requests (#13533) --- changelog.d/13533.misc | 1 + synapse/rest/client/room.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 changelog.d/13533.misc diff --git a/changelog.d/13533.misc b/changelog.d/13533.misc new file mode 100644 index 0000000000..ab4b18887a --- /dev/null +++ b/changelog.d/13533.misc @@ -0,0 +1 @@ +Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 2f513164cb..d29417fafc 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -19,6 +19,8 @@ import re from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple from urllib import parse as urlparse +from prometheus_client.core import Histogram + from twisted.web.server import Request from synapse import event_auth @@ -60,6 +62,35 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# This is an extra metric on top of `synapse_http_server_response_time_seconds` +# which times the same sort of thing but this one allows us to see values +# greater than 10s. We use a separate dedicated histogram with its own buckets +# so that we don't increase the cardinality of the general one because it's +# multiplied across hundreds of servlets. +messsages_response_timer = Histogram( + "synapse_room_message_list_rest_servlet_response_time_seconds", + "sec", + [], + buckets=( + 0.005, + 0.01, + 0.025, + 0.05, + 0.1, + 0.25, + 0.5, + 1.0, + 2.5, + 5.0, + 10.0, + 30.0, + 60.0, + 120.0, + 180.0, + "+Inf", + ), +) + class TransactionRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): @@ -560,6 +591,7 @@ class RoomMessageListRestServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main + @messsages_response_timer.time() async def on_GET( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: -- cgit 1.4.1 From 088bcb7ecb91df5ca527d096299fbcec4ce5f5fa Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 Aug 2022 04:33:19 -0500 Subject: Time how long it takes us to do backfill processing (#13535) --- changelog.d/13535.misc | 1 + synapse/handlers/federation.py | 56 ++++++++++++++++++++++++++++++++++-- synapse/handlers/federation_event.py | 49 ++++++++++++++++++++++--------- 3 files changed, 90 insertions(+), 16 deletions(-) create mode 100644 changelog.d/13535.misc diff --git a/changelog.d/13535.misc b/changelog.d/13535.misc new file mode 100644 index 0000000000..6b190181c8 --- /dev/null +++ b/changelog.d/13535.misc @@ -0,0 +1 @@ +Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index d13011d138..a09eaa4379 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -32,6 +32,7 @@ from typing import ( ) import attr +from prometheus_client import Histogram from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json from unpaddedbase64 import decode_base64 @@ -79,6 +80,24 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# Added to debug performance and track progress on optimizations +backfill_processing_before_timer = Histogram( + "synapse_federation_backfill_processing_before_time_seconds", + "sec", + [], + buckets=( + 1.0, + 5.0, + 10.0, + 20.0, + 30.0, + 40.0, + 60.0, + 80.0, + "+Inf", + ), +) + def get_domains_from_state(state: StateMap[EventBase]) -> List[Tuple[str, int]]: """Get joined domains from state @@ -138,6 +157,7 @@ class FederationHandler: def __init__(self, hs: "HomeServer"): self.hs = hs + self.clock = hs.get_clock() self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state @@ -197,12 +217,39 @@ class FederationHandler: return. This is used as part of the heuristic to decide if we should back paginate. """ + # Starting the processing time here so we can include the room backfill + # linearizer lock queue in the timing + processing_start_time = self.clock.time_msec() + async with self._room_backfill.queue(room_id): - return await self._maybe_backfill_inner(room_id, current_depth, limit) + return await self._maybe_backfill_inner( + room_id, + current_depth, + limit, + processing_start_time=processing_start_time, + ) async def _maybe_backfill_inner( - self, room_id: str, current_depth: int, limit: int + self, + room_id: str, + current_depth: int, + limit: int, + *, + processing_start_time: int, ) -> bool: + """ + Checks whether the `current_depth` is at or approaching any backfill + points in the room and if so, will backfill. We only care about + checking backfill points that happened before the `current_depth` + (meaning less than or equal to the `current_depth`). + + Args: + room_id: The room to backfill in. + current_depth: The depth to check at for any upcoming backfill points. + limit: The max number of events to request from the remote federated server. + processing_start_time: The time when `maybe_backfill` started + processing. Only used for timing. + """ backwards_extremities = [ _BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY) for event_id, depth in await self.store.get_oldest_event_ids_with_depth_in_room( @@ -433,6 +480,11 @@ class FederationHandler: return False + processing_end_time = self.clock.time_msec() + backfill_processing_before_timer.observe( + (processing_start_time - processing_end_time) / 1000 + ) + success = await try_backfill(likely_domains) if success: return True diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index dd0d610fe9..f40b071a74 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -29,7 +29,7 @@ from typing import ( Tuple, ) -from prometheus_client import Counter +from prometheus_client import Counter, Histogram from synapse import event_auth from synapse.api.constants import ( @@ -98,6 +98,26 @@ soft_failed_event_counter = Counter( "Events received over federation that we marked as soft_failed", ) +# Added to debug performance and track progress on optimizations +backfill_processing_after_timer = Histogram( + "synapse_federation_backfill_processing_after_time_seconds", + "sec", + [], + buckets=( + 1.0, + 5.0, + 10.0, + 20.0, + 30.0, + 40.0, + 60.0, + 80.0, + 120.0, + 180.0, + "+Inf", + ), +) + class FederationEventHandler: """Handles events that originated from federation. @@ -604,20 +624,21 @@ class FederationEventHandler: if not events: return - # if there are any events in the wrong room, the remote server is buggy and - # should not be trusted. - for ev in events: - if ev.room_id != room_id: - raise InvalidResponseError( - f"Remote server {dest} returned event {ev.event_id} which is in " - f"room {ev.room_id}, when we were backfilling in {room_id}" - ) + with backfill_processing_after_timer.time(): + # if there are any events in the wrong room, the remote server is buggy and + # should not be trusted. + for ev in events: + if ev.room_id != room_id: + raise InvalidResponseError( + f"Remote server {dest} returned event {ev.event_id} which is in " + f"room {ev.room_id}, when we were backfilling in {room_id}" + ) - await self._process_pulled_events( - dest, - events, - backfilled=True, - ) + await self._process_pulled_events( + dest, + events, + backfilled=True, + ) @trace async def _get_missing_events_for_pdu( -- cgit 1.4.1 From c6ee9c0ee40803a9e3673c2833e5a40032e86f5a Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 Aug 2022 04:38:05 -0500 Subject: Add metrics to track rate limiter queue timing (#13544) --- changelog.d/13544.misc | 1 + synapse/util/ratelimitutils.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 changelog.d/13544.misc diff --git a/changelog.d/13544.misc b/changelog.d/13544.misc new file mode 100644 index 0000000000..d84ba3f076 --- /dev/null +++ b/changelog.d/13544.misc @@ -0,0 +1 @@ +Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index e1beaec5a3..e48324d926 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -28,6 +28,7 @@ from synapse.logging.context import ( run_in_background, ) from synapse.logging.opentracing import start_active_span +from synapse.metrics import Histogram from synapse.util import Clock if typing.TYPE_CHECKING: @@ -36,6 +37,29 @@ if typing.TYPE_CHECKING: logger = logging.getLogger(__name__) +queue_wait_timer = Histogram( + "synapse_rate_limit_queue_wait_time_seconds", + "sec", + [], + buckets=( + 0.005, + 0.01, + 0.025, + 0.05, + 0.1, + 0.25, + 0.5, + 0.75, + 1.0, + 2.5, + 5.0, + 10.0, + 20.0, + "+Inf", + ), +) + + class FederationRateLimiter: def __init__(self, clock: Clock, config: FederationRatelimitSettings): def new_limiter() -> "_PerHostRatelimiter": @@ -178,10 +202,16 @@ class _PerHostRatelimiter: self.sleeping_requests.discard(request_id) self.ready_request_queue.pop(request_id, None) wait_span_scope.__exit__(None, None, None) + wait_timer_cm.__exit__(None, None, None) return r + # Tracing wait_span_scope = start_active_span("ratelimit wait") wait_span_scope.__enter__() + # Metrics + wait_timer_cm = queue_wait_timer.time() + wait_timer_cm.__enter__() + ret_defer.addCallbacks(on_start, on_err) ret_defer.addBoth(on_both) return make_deferred_yieldable(ret_defer) -- cgit 1.4.1 From d75512d19ebea6c0f9e38e9f55474fdb6da02b46 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 17 Aug 2022 11:42:01 +0200 Subject: Add forgotten status to Room Details API (#13503) --- changelog.d/13503.feature | 1 + docs/admin_api/rooms.md | 5 +- synapse/rest/admin/rooms.py | 1 + synapse/storage/databases/main/roommember.py | 24 ++++++++++ tests/rest/admin/test_room.py | 1 + tests/storage/test_roommember.py | 70 ++++++++++++++++++++++++++++ 6 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13503.feature diff --git a/changelog.d/13503.feature b/changelog.d/13503.feature new file mode 100644 index 0000000000..4baabd1e32 --- /dev/null +++ b/changelog.d/13503.feature @@ -0,0 +1 @@ +Add forgotten status to Room Details API. \ No newline at end of file diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 9aa489e4a3..ac7c54c20e 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -302,6 +302,8 @@ The following fields are possible in the JSON response body: * `state_events` - Total number of state_events of a room. Complexity of the room. * `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space. If the room does not define a type, the value will be `null`. +* `forgotten` - Whether all local users have + [forgotten](https://spec.matrix.org/latest/client-server-api/#leaving-rooms) the room. The API is: @@ -330,7 +332,8 @@ A response body like the following is returned: "guest_access": null, "history_visibility": "shared", "state_events": 93534, - "room_type": "m.space" + "room_type": "m.space", + "forgotten": false } ``` diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 9d953d58de..68054ffc28 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -303,6 +303,7 @@ class RoomRestServlet(RestServlet): members = await self.store.get_users_in_room(room_id) ret["joined_local_devices"] = await self.store.count_devices_by_users(members) + ret["forgotten"] = await self.store.is_locally_forgotten_room(room_id) return HTTPStatus.OK, ret diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 5e5f607a14..827c1f1efd 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1215,6 +1215,30 @@ class RoomMemberWorkerStore(EventsWorkerStore): "get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn ) + async def is_locally_forgotten_room(self, room_id: str) -> bool: + """Returns whether all local users have forgotten this room_id. + + Args: + room_id: The room ID to query. + + Returns: + Whether the room is forgotten. + """ + + sql = """ + SELECT count(*) > 0 FROM local_current_membership + INNER JOIN room_memberships USING (room_id, event_id) + WHERE + room_id = ? + AND forgotten = 0; + """ + + rows = await self.db_pool.execute("is_forgotten_room", None, sql, room_id) + + # `count(*)` returns always an integer + # If any rows still exist it means someone has not forgotten this room yet + return not rows[0][0] + async def get_rooms_user_has_been_in(self, user_id: str) -> Set[str]: """Get all rooms that the user has ever been in. diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index dd5000679a..fd6da557c1 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1633,6 +1633,7 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertIn("history_visibility", channel.json_body) self.assertIn("state_events", channel.json_body) self.assertIn("room_type", channel.json_body) + self.assertIn("forgotten", channel.json_body) self.assertEqual(room_id_1, channel.json_body["room_id"]) def test_single_room_devices(self) -> None: diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 240b02cb9f..ceec690285 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -23,6 +23,7 @@ from synapse.util import Clock from tests import unittest from tests.server import TestHomeServer +from tests.test_utils import event_injection class RoomMemberStoreTestCase(unittest.HomeserverTestCase): @@ -157,6 +158,75 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): # Check that alice's display name is now None self.assertEqual(row[0]["display_name"], None) + def test_room_is_locally_forgotten(self): + """Test that when the last local user has forgotten a room it is known as forgotten.""" + # join two local and one remote user + self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) + self.get_success( + event_injection.inject_member_event(self.hs, self.room, self.u_bob, "join") + ) + self.get_success( + event_injection.inject_member_event( + self.hs, self.room, self.u_charlie.to_string(), "join" + ) + ) + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(self.room)) + ) + + # local users leave the room and the room is not forgotten + self.get_success( + event_injection.inject_member_event( + self.hs, self.room, self.u_alice, "leave" + ) + ) + self.get_success( + event_injection.inject_member_event(self.hs, self.room, self.u_bob, "leave") + ) + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(self.room)) + ) + + # first user forgets the room, room is not forgotten + self.get_success(self.store.forget(self.u_alice, self.room)) + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(self.room)) + ) + + # second (last local) user forgets the room and the room is forgotten + self.get_success(self.store.forget(self.u_bob, self.room)) + self.assertTrue( + self.get_success(self.store.is_locally_forgotten_room(self.room)) + ) + + def test_join_locally_forgotten_room(self): + """Tests if a user joins a forgotten room the room is not forgotten anymore.""" + self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(self.room)) + ) + + # after leaving and forget the room, it is forgotten + self.get_success( + event_injection.inject_member_event( + self.hs, self.room, self.u_alice, "leave" + ) + ) + self.get_success(self.store.forget(self.u_alice, self.room)) + self.assertTrue( + self.get_success(self.store.is_locally_forgotten_room(self.room)) + ) + + # after rejoin the room is not forgotten anymore + self.get_success( + event_injection.inject_member_event( + self.hs, self.room, self.u_alice, "join" + ) + ) + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(self.room)) + ) + class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: -- cgit 1.4.1 From b71b41c7bd0dd7510182621ab4bc8267388c0cce Mon Sep 17 00:00:00 2001 From: Antonin Loubiere <40536532+AntoninLoubiere@users.noreply.github.com> Date: Wed, 17 Aug 2022 11:59:05 +0200 Subject: Fix a typo in docs and in some warnings (#13538) --- changelog.d/13538.doc | 1 + docs/templates.md | 2 +- synapse/config/account_validity.py | 2 +- synapse/config/emailconfig.py | 2 +- synapse/config/sso.py | 2 +- 5 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13538.doc diff --git a/changelog.d/13538.doc b/changelog.d/13538.doc new file mode 100644 index 0000000000..9215aeac5a --- /dev/null +++ b/changelog.d/13538.doc @@ -0,0 +1 @@ +Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). \ No newline at end of file diff --git a/docs/templates.md b/docs/templates.md index f87692a453..453ac90dd8 100644 --- a/docs/templates.md +++ b/docs/templates.md @@ -9,7 +9,7 @@ in, allowing them to specify custom templates: ```yaml templates: - custom_templates_directory: /path/to/custom/templates/ + custom_template_directory: /path/to/custom/templates/ ``` If this setting is not set, or the files named below are not found within the directory, diff --git a/synapse/config/account_validity.py b/synapse/config/account_validity.py index d1335e77cd..b3972ede96 100644 --- a/synapse/config/account_validity.py +++ b/synapse/config/account_validity.py @@ -23,7 +23,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """ This server's configuration file is using the deprecated 'template_dir' setting in the 'account_validity' section. Support for this setting has been deprecated and will be removed in a future version of Synapse. Server admins should instead use the new -'custom_templates_directory' setting documented here: +'custom_template_directory' setting documented here: https://matrix-org.github.io/synapse/latest/templates.html ---------------------------------------------------------------------------------------""" diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 7765c5b454..66a6dbf1fe 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -53,7 +53,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """ This server's configuration file is using the deprecated 'template_dir' setting in the 'email' section. Support for this setting has been deprecated and will be removed in a future version of Synapse. Server admins should instead use the new -'custom_templates_directory' setting documented here: +'custom_template_directory' setting documented here: https://matrix-org.github.io/synapse/latest/templates.html ---------------------------------------------------------------------------------------""" diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 2178cbf983..a452cc3a49 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -26,7 +26,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """ This server's configuration file is using the deprecated 'template_dir' setting in the 'sso' section. Support for this setting has been deprecated and will be removed in a future version of Synapse. Server admins should instead use the new -'custom_templates_directory' setting documented here: +'custom_template_directory' setting documented here: https://matrix-org.github.io/synapse/latest/templates.html ---------------------------------------------------------------------------------------""" -- cgit 1.4.1 From ba8938b090c7e1908cfa4feac75f08f3bc1183e8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 17 Aug 2022 11:17:04 +0100 Subject: Reject non-strict types in Pydantic models (#13502) --- .github/workflows/tests.yml | 14 +- changelog.d/13502.misc | 1 + scripts-dev/check_pydantic_models.py | 425 +++++++++++++++++++++++++++++++++++ scripts-dev/lint.sh | 1 + 4 files changed, 440 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13502.misc create mode 100755 scripts-dev/check_pydantic_models.py diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4bc29c8207..144cb9ffaa 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -53,10 +53,22 @@ jobs: env: PULL_REQUEST_NUMBER: ${{ github.event.number }} + lint-pydantic: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + - uses: matrix-org/setup-python-poetry@v1 + with: + extras: "all" + - run: poetry run scripts-dev/check_pydantic_models.py + # Dummy step to gate other tests on without repeating the whole list linting-done: if: ${{ !cancelled() }} # Run this even if prior jobs were skipped - needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta] + needs: [lint, lint-crlf, lint-newsfile, lint-pydantic, check-sampleconfig, check-schema-delta] runs-on: ubuntu-latest steps: - run: "true" diff --git a/changelog.d/13502.misc b/changelog.d/13502.misc new file mode 100644 index 0000000000..ed6832996e --- /dev/null +++ b/changelog.d/13502.misc @@ -0,0 +1 @@ +Add a linter script which will reject non-strict types in Pydantic models. diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py new file mode 100755 index 0000000000..d0fb811bdb --- /dev/null +++ b/scripts-dev/check_pydantic_models.py @@ -0,0 +1,425 @@ +#! /usr/bin/env python +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A script which enforces that Synapse always uses strict types when defining a Pydantic +model. + +Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. See + + https://github.com/pydantic/pydantic/issues/1098 + https://pydantic-docs.helpmanual.io/blog/pydantic-v2/#strict-mode + +until then, this script is a best effort to stop us from introducing type coersion bugs +(like the infamous stringy power levels fixed in room version 10). +""" +import argparse +import contextlib +import functools +import importlib +import logging +import os +import pkgutil +import sys +import textwrap +import traceback +import unittest.mock +from contextlib import contextmanager +from typing import Any, Callable, Dict, Generator, List, Set, Type, TypeVar + +from parameterized import parameterized +from pydantic import BaseModel as PydanticBaseModel, conbytes, confloat, conint, constr +from pydantic.typing import get_args +from typing_extensions import ParamSpec + +logger = logging.getLogger(__name__) + +CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: List[Callable] = [ + constr, + conbytes, + conint, + confloat, +] + +TYPES_THAT_PYDANTIC_WILL_COERCE_TO = [ + str, + bytes, + int, + float, + bool, +] + + +P = ParamSpec("P") +R = TypeVar("R") + + +class ModelCheckerException(Exception): + """Dummy exception. Allows us to detect unwanted types during a module import.""" + + +class MissingStrictInConstrainedTypeException(ModelCheckerException): + factory_name: str + + def __init__(self, factory_name: str): + self.factory_name = factory_name + + +class FieldHasUnwantedTypeException(ModelCheckerException): + message: str + + def __init__(self, message: str): + self.message = message + + +def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]: + """We patch `constr` and friends with wrappers that enforce strict=True.""" + + @functools.wraps(factory) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + # type-ignore: should be redundant once we can use https://github.com/python/mypy/pull/12668 + if "strict" not in kwargs: # type: ignore[attr-defined] + raise MissingStrictInConstrainedTypeException(factory.__name__) + if not kwargs["strict"]: # type: ignore[index] + raise MissingStrictInConstrainedTypeException(factory.__name__) + return factory(*args, **kwargs) + + return wrapper + + +def field_type_unwanted(type_: Any) -> bool: + """Very rough attempt to detect if a type is unwanted as a Pydantic annotation. + + At present, we exclude types which will coerce, or any generic type involving types + which will coerce.""" + logger.debug("Is %s unwanted?") + if type_ in TYPES_THAT_PYDANTIC_WILL_COERCE_TO: + logger.debug("yes") + return True + logger.debug("Maybe. Subargs are %s", get_args(type_)) + rv = any(field_type_unwanted(t) for t in get_args(type_)) + logger.debug("Conclusion: %s %s unwanted", type_, "is" if rv else "is not") + return rv + + +class PatchedBaseModel(PydanticBaseModel): + """A patched version of BaseModel that inspects fields after models are defined. + + We complain loudly if we see an unwanted type. + + Beware: ModelField.type_ is presumably private; this is likely to be very brittle. + """ + + @classmethod + def __init_subclass__(cls: Type[PydanticBaseModel], **kwargs: object): + for field in cls.__fields__.values(): + # Note that field.type_ and field.outer_type are computed based on the + # annotation type, see pydantic.fields.ModelField._type_analysis + if field_type_unwanted(field.outer_type_): + # TODO: this only reports the first bad field. Can we find all bad ones + # and report them all? + raise FieldHasUnwantedTypeException( + f"{cls.__module__}.{cls.__qualname__} has field '{field.name}' " + f"with unwanted type `{field.outer_type_}`" + ) + + +@contextmanager +def monkeypatch_pydantic() -> Generator[None, None, None]: + """Patch pydantic with our snooping versions of BaseModel and the con* functions. + + If the snooping functions see something they don't like, they'll raise a + ModelCheckingException instance. + """ + with contextlib.ExitStack() as patches: + # Most Synapse code ought to import the patched objects directly from + # `pydantic`. But we also patch their containing modules `pydantic.main` and + # `pydantic.types` for completeness. + patch_basemodel1 = unittest.mock.patch( + "pydantic.BaseModel", new=PatchedBaseModel + ) + patch_basemodel2 = unittest.mock.patch( + "pydantic.main.BaseModel", new=PatchedBaseModel + ) + patches.enter_context(patch_basemodel1) + patches.enter_context(patch_basemodel2) + for factory in CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: + wrapper: Callable = make_wrapper(factory) + patch1 = unittest.mock.patch(f"pydantic.{factory.__name__}", new=wrapper) + patch2 = unittest.mock.patch( + f"pydantic.types.{factory.__name__}", new=wrapper + ) + patches.enter_context(patch1) + patches.enter_context(patch2) + yield + + +def format_model_checker_exception(e: ModelCheckerException) -> str: + """Work out which line of code caused e. Format the line in a human-friendly way.""" + # TODO. FieldHasUnwantedTypeException gives better error messages. Can we ditch the + # patches of constr() etc, and instead inspect fields to look for ConstrainedStr + # with strict=False? There is some difficulty with the inheritance hierarchy + # because StrictStr < ConstrainedStr < str. + if isinstance(e, FieldHasUnwantedTypeException): + return e.message + elif isinstance(e, MissingStrictInConstrainedTypeException): + frame_summary = traceback.extract_tb(e.__traceback__)[-2] + return ( + f"Missing `strict=True` from {e.factory_name}() call \n" + + traceback.format_list([frame_summary])[0].lstrip() + ) + else: + raise ValueError(f"Unknown exception {e}") from e + + +def lint() -> int: + """Try to import all of Synapse and see if we spot any Pydantic type coercions. + + Print any problems, then return a status code suitable for sys.exit.""" + failures = do_lint() + if failures: + print(f"Found {len(failures)} problem(s)") + for failure in sorted(failures): + print(failure) + return os.EX_DATAERR if failures else os.EX_OK + + +def do_lint() -> Set[str]: + """Try to import all of Synapse and see if we spot any Pydantic type coercions.""" + failures = set() + + with monkeypatch_pydantic(): + logger.debug("Importing synapse") + try: + # TODO: make "synapse" an argument so we can target this script at + # a subpackage + module = importlib.import_module("synapse") + except ModelCheckerException as e: + logger.warning("Bad annotation found when importing synapse") + failures.add(format_model_checker_exception(e)) + return failures + + try: + logger.debug("Fetching subpackages") + module_infos = list( + pkgutil.walk_packages(module.__path__, f"{module.__name__}.") + ) + except ModelCheckerException as e: + logger.warning("Bad annotation found when looking for modules to import") + failures.add(format_model_checker_exception(e)) + return failures + + for module_info in module_infos: + logger.debug("Importing %s", module_info.name) + try: + importlib.import_module(module_info.name) + except ModelCheckerException as e: + logger.warning( + f"Bad annotation found when importing {module_info.name}" + ) + failures.add(format_model_checker_exception(e)) + + return failures + + +def run_test_snippet(source: str) -> None: + """Exec a snippet of source code in an isolated environment.""" + # To emulate `source` being called at the top level of the module, + # the globals and locals we provide apparently have to be the same mapping. + # + # > Remember that at the module level, globals and locals are the same dictionary. + # > If exec gets two separate objects as globals and locals, the code will be + # > executed as if it were embedded in a class definition. + globals_: Dict[str, object] + locals_: Dict[str, object] + globals_ = locals_ = {} + exec(textwrap.dedent(source), globals_, locals_) + + +class TestConstrainedTypesPatch(unittest.TestCase): + def test_expression_without_strict_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + from pydantic import constr + constr() + """ + ) + + def test_called_as_module_attribute_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + import pydantic + pydantic.constr() + """ + ) + + def test_wildcard_import_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + from pydantic import * + constr() + """ + ) + + def test_alternative_import_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + from pydantic.types import constr + constr() + """ + ) + + def test_alternative_import_attribute_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + import pydantic.types + pydantic.types.constr() + """ + ) + + def test_kwarg_but_no_strict_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + from pydantic import constr + constr(min_length=10) + """ + ) + + def test_kwarg_strict_False_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + from pydantic import constr + constr(strict=False) + """ + ) + + def test_kwarg_strict_True_doesnt_raise(self) -> None: + with monkeypatch_pydantic(): + run_test_snippet( + """ + from pydantic import constr + constr(strict=True) + """ + ) + + def test_annotation_without_strict_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + from pydantic import constr + x: constr() + """ + ) + + def test_field_annotation_without_strict_raises(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + from pydantic import BaseModel, conint + class C: + x: conint() + """ + ) + + +class TestFieldTypeInspection(unittest.TestCase): + @parameterized.expand( + [ + ("str",), + ("bytes"), + ("int",), + ("float",), + ("bool"), + ("Optional[str]",), + ("Union[None, str]",), + ("List[str]",), + ("List[List[str]]",), + ("Dict[StrictStr, str]",), + ("Dict[str, StrictStr]",), + ("TypedDict('D', x=int)",), + ] + ) + def test_field_holding_unwanted_type_raises(self, annotation: str) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + f""" + from typing import * + from pydantic import * + class C(BaseModel): + f: {annotation} + """ + ) + + @parameterized.expand( + [ + ("StrictStr",), + ("StrictBytes"), + ("StrictInt",), + ("StrictFloat",), + ("StrictBool"), + ("constr(strict=True, min_length=10)",), + ("Optional[StrictStr]",), + ("Union[None, StrictStr]",), + ("List[StrictStr]",), + ("List[List[StrictStr]]",), + ("Dict[StrictStr, StrictStr]",), + ("TypedDict('D', x=StrictInt)",), + ] + ) + def test_field_holding_accepted_type_doesnt_raise(self, annotation: str) -> None: + with monkeypatch_pydantic(): + run_test_snippet( + f""" + from typing import * + from pydantic import * + class C(BaseModel): + f: {annotation} + """ + ) + + def test_field_holding_str_raises_with_alternative_import(self) -> None: + with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): + run_test_snippet( + """ + from pydantic.main import BaseModel + class C(BaseModel): + f: str + """ + ) + + +parser = argparse.ArgumentParser() +parser.add_argument("mode", choices=["lint", "test"], default="lint", nargs="?") +parser.add_argument("-v", "--verbose", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args(sys.argv[1:]) + logging.basicConfig( + format="%(asctime)s %(name)s:%(lineno)d %(levelname)s %(message)s", + level=logging.DEBUG if args.verbose else logging.INFO, + ) + # suppress logs we don't care about + logging.getLogger("xmlschema").setLevel(logging.WARNING) + if args.mode == "lint": + sys.exit(lint()) + elif args.mode == "test": + unittest.main(argv=sys.argv[:1]) diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 377348b107..bf900645b1 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -106,4 +106,5 @@ isort "${files[@]}" python3 -m black "${files[@]}" ./scripts-dev/config-lint.sh flake8 "${files[@]}" +./scripts-dev/check_pydantic_models.py lint mypy -- cgit 1.4.1 From 436e0eb39a50c0d83b8573ae92ee84f87f21fbd3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Aug 2022 12:02:38 +0100 Subject: Fix breaking event sending due to bad push rule (#13547) Broke by #13522 It looks like we have some rules in the DB with a priority class less than 0 that don't override the base rules. Before these were just dropped, but #13522 made that a hard error. --- changelog.d/13547.misc | 1 + synapse/push/baserules.py | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13547.misc diff --git a/changelog.d/13547.misc b/changelog.d/13547.misc new file mode 100644 index 0000000000..0a8827205d --- /dev/null +++ b/changelog.d/13547.misc @@ -0,0 +1 @@ +Improve performance of sending messages in rooms with thousands of local users. diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index c3e072033c..440205e80c 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -49,6 +49,7 @@ kind, etc, etc. """ import itertools +import logging from typing import Dict, Iterator, List, Mapping, Sequence, Tuple, Union import attr @@ -56,6 +57,8 @@ import attr from synapse.config.experimental import ExperimentalConfig from synapse.push.rulekinds import PRIORITY_CLASS_MAP +logger = logging.getLogger(__name__) + @attr.s(auto_attribs=True, slots=True, frozen=True) class PushRule: @@ -199,8 +202,16 @@ def compile_push_rules(rawrules: List[PushRule]) -> PushRules: collection = rules.sender elif rule.priority_class == 1: collection = rules.underride + elif rule.priority_class <= 0: + logger.info( + "Got rule with priority class less than zero, but doesn't override a base rule: %s", + rule, + ) + continue else: - raise Exception(f"Unknown priority class: {rule.priority_class}") + # We log and continue here so as not to break event sending + logger.error("Unknown priority class: %", rule.priority_class) + continue collection.append(rule) -- cgit 1.4.1 From 82a0752f3280ebe5631cafc3b4b34198ff9fc86b Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 17 Aug 2022 16:46:09 +0100 Subject: A first pass at pruning the Synapse README (#13491) --- README.rst | 450 ++++++++------------------------- changelog.d/13491.doc | 1 + docs/usage/administration/admin_faq.md | 90 ++++++- 3 files changed, 194 insertions(+), 347 deletions(-) create mode 100644 changelog.d/13491.doc diff --git a/README.rst b/README.rst index 219e32de8e..84e5310309 100644 --- a/README.rst +++ b/README.rst @@ -2,107 +2,111 @@ Synapse |support| |development| |documentation| |license| |pypi| |python| ========================================================================= +Synapse is an open-source `Matrix `_ homeserver written and +maintained by the Matrix.org Foundation. We began rapid development began in 2014, +reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues +in earnest today. + +Briefly, Matrix is an open standard for communications on the internet, supporting +federation, encryption and VoIP. Matrix.org has more to say about the `goals of the +Matrix project `_, and the `formal specification +`_ describes the technical details. + .. contents:: -Introduction -============ +Installing and configuration +============================ -Matrix is an ambitious new ecosystem for open federated Instant Messaging and -VoIP. The basics you need to know to get up and running are: +The Synapse documentation describes `how to install Synapse `_. We recommend using +`Docker images `_ or `Debian packages from Matrix.org +`_. -- Everything in Matrix happens in a room. Rooms are distributed and do not - exist on any single server. Rooms can be located using convenience aliases - like ``#matrix:matrix.org`` or ``#test:localhost:8448``. +.. _federation: -- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future - you will normally refer to yourself and others using a third party identifier - (3PID): email address, phone number, etc rather than manipulating Matrix user IDs) +Synapse has a variety of `config options +`_ +which can be used to customise its behaviour after installation. +There are additional details on how to `configure Synapse for federation here +`_. -The overall architecture is:: +.. _reverse-proxy: - client <----> homeserver <=====================> homeserver <----> client - https://somewhere.org/_matrix https://elsewhere.net/_matrix +Using a reverse proxy with Synapse +---------------------------------- -``#matrix:matrix.org`` is the official support room for Matrix, and can be -accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or -via IRC bridge at irc://irc.libera.chat/matrix. +It is recommended to put a reverse proxy such as +`nginx `_, +`Apache `_, +`Caddy `_, +`HAProxy `_ or +`relayd `_ in front of Synapse. One advantage of +doing so is that it means that you can expose the default https port (443) to +Matrix clients without needing to run Synapse with root privileges. +For information on configuring one, see `the reverse proxy docs +`_. -Synapse is currently in rapid development, but as of version 0.5 we believe it -is sufficiently stable to be run as an internet-facing service for real usage! +Upgrading an existing Synapse +----------------------------- -About Matrix -============ +The instructions for upgrading Synapse are in `the upgrade notes`_. +Please check these instructions as upgrading may require extra steps for some +versions of Synapse. -Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard, -which handle: +.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html -- Creating and managing fully distributed chat rooms with no - single points of control or failure -- Eventually-consistent cryptographically secure synchronisation of room - state across a global open network of federated servers and services -- Sending and receiving extensible messages in a room with (optional) - end-to-end encryption -- Inviting, joining, leaving, kicking, banning room members -- Managing user accounts (registration, login, logout) -- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers, - Facebook accounts to authenticate, identify and discover users on Matrix. -- Placing 1:1 VoIP and Video calls -These APIs are intended to be implemented on a wide range of servers, services -and clients, letting developers build messaging and VoIP functionality on top -of the entirely open Matrix ecosystem rather than using closed or proprietary -solutions. The hope is for Matrix to act as the building blocks for a new -generation of fully open and interoperable messaging and VoIP apps for the -internet. +Platform dependencies +--------------------- -Synapse is a Matrix "homeserver" implementation developed by the matrix.org core -team, written in Python 3/Twisted. +Synapse uses a number of platform dependencies such as Python and PostgreSQL, +and aims to follow supported upstream versions. See the +`deprecation policy `_ +for more details. -In Matrix, every user runs one or more Matrix clients, which connect through to -a Matrix homeserver. The homeserver stores all their personal chat history and -user account information - much as a mail client connects through to an -IMAP/SMTP server. Just like email, you can either run your own Matrix -homeserver and control and own your own communications and history or use one -hosted by someone else (e.g. matrix.org) - there is no single point of control -or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, -etc. -We'd like to invite you to join #matrix:matrix.org (via -https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look -at the `Matrix spec `_, and experiment with the -`APIs `_ and `Client SDKs -`_. +Security note +------------- -Thanks for using Matrix! +Matrix serves raw, user-supplied data in some APIs -- specifically the `content +repository endpoints`_. -Support -======= +.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid -For support installing or managing Synapse, please join |room|_ (from a matrix.org -account if necessary) and ask questions there. We do not use GitHub issues for -support requests, only for bug reports and feature requests. +Whilst we make a reasonable effort to mitigate against XSS attacks (for +instance, by using `CSP`_), a Matrix homeserver should not be hosted on a +domain hosting other web applications. This especially applies to sharing +the domain with Matrix web clients and other sensitive applications like +webmail. See +https://developer.github.com/changes/2014-04-25-user-content-security for more +information. -Synapse's documentation is `nicely rendered on GitHub Pages `_, -with its source available in |docs|_. +.. _CSP: https://github.com/matrix-org/synapse/pull/1021 -.. |room| replace:: ``#synapse:matrix.org`` -.. _room: https://matrix.to/#/#synapse:matrix.org +Ideally, the homeserver should not simply be on a different subdomain, but on +a completely different `registered domain`_ (also known as top-level site or +eTLD+1). This is because `some attacks`_ are still possible as long as the two +applications share the same registered domain. -.. |docs| replace:: ``docs`` -.. _docs: docs +.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3 -Synapse Installation -==================== +.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie -.. _federation: +To illustrate this with an example, if your Element Web or other sensitive web +application is hosted on ``A.example1.com``, you should ideally host Synapse on +``example2.com``. Some amount of protection is offered by hosting on +``B.example1.com`` instead, so this is also acceptable in some scenarios. +However, you should *not* host your Synapse on ``A.example1.com``. + +Note that all of the above refers exclusively to the domain used in Synapse's +``public_baseurl`` setting. In particular, it has no bearing on the domain +mentioned in MXIDs hosted on that server. -* For details on how to install synapse, see - `Installation Instructions `_. -* For specific details on how to configure Synapse for federation see `docs/federate.md `_ +Following this advice ensures that even if an XSS is found in Synapse, the +impact to other applications will be minimal. -Connecting to Synapse from a client -=================================== +Testing a new installation +========================== The easiest way to try out your new Synapse installation is by connecting to it from a web client. @@ -129,11 +133,20 @@ Registering a new user from a client ------------------------------------ By default, registration of new users via Matrix clients is disabled. To enable -it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then -recommended to also set up CAPTCHA - see ``_.) +it: + +1. In the + `registration config section `_ + set ``enable_registration: true`` in ``homeserver.yaml``. +2. Then **either**: -Once ``enable_registration`` is set to ``true``, it is possible to register a -user via a Matrix client. + a. set up a `CAPTCHA `_, or + b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``. + +We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to +the public internet. Without it, anyone can freely register accounts on your homeserver. +This can be exploited by attackers to create spambots targetting the rest of the Matrix +federation. Your new user name will be formed partly from the ``server_name``, and partly from a localpart you specify when you create the account. Your name will take @@ -146,71 +159,22 @@ the form of:: As when logging in, you will need to specify a "Custom server". Specify your desired ``localpart`` in the 'User name' box. -Security note -============= +Troubleshooting and support +=========================== -Matrix serves raw, user-supplied data in some APIs -- specifically the `content -repository endpoints`_. +The `Admin FAQ `_ +includes tips on dealing with some common problems. For more details, see +`Synapse's wider documentation `_. -.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid - -Whilst we make a reasonable effort to mitigate against XSS attacks (for -instance, by using `CSP`_), a Matrix homeserver should not be hosted on a -domain hosting other web applications. This especially applies to sharing -the domain with Matrix web clients and other sensitive applications like -webmail. See -https://developer.github.com/changes/2014-04-25-user-content-security for more -information. - -.. _CSP: https://github.com/matrix-org/synapse/pull/1021 - -Ideally, the homeserver should not simply be on a different subdomain, but on -a completely different `registered domain`_ (also known as top-level site or -eTLD+1). This is because `some attacks`_ are still possible as long as the two -applications share the same registered domain. - -.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3 - -.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie +For additional support installing or managing Synapse, please ask in the community +support room |room|_ (from a matrix.org account if necessary). We do not use GitHub +issues for support requests, only for bug reports and feature requests. -To illustrate this with an example, if your Element Web or other sensitive web -application is hosted on ``A.example1.com``, you should ideally host Synapse on -``example2.com``. Some amount of protection is offered by hosting on -``B.example1.com`` instead, so this is also acceptable in some scenarios. -However, you should *not* host your Synapse on ``A.example1.com``. - -Note that all of the above refers exclusively to the domain used in Synapse's -``public_baseurl`` setting. In particular, it has no bearing on the domain -mentioned in MXIDs hosted on that server. - -Following this advice ensures that even if an XSS is found in Synapse, the -impact to other applications will be minimal. - - -Upgrading an existing Synapse -============================= - -The instructions for upgrading synapse are in `the upgrade notes`_. -Please check these instructions as upgrading may require extra steps for some -versions of synapse. - -.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html - -.. _reverse-proxy: - -Using a reverse proxy with Synapse -================================== - -It is recommended to put a reverse proxy such as -`nginx `_, -`Apache `_, -`Caddy `_, -`HAProxy `_ or -`relayd `_ in front of Synapse. One advantage of -doing so is that it means that you can expose the default https port (443) to -Matrix clients without needing to run Synapse with root privileges. +.. |room| replace:: ``#synapse:matrix.org`` +.. _room: https://matrix.to/#/#synapse:matrix.org -For information on configuring one, see ``_. +.. |docs| replace:: ``docs`` +.. _docs: docs Identity Servers ================ @@ -242,34 +206,15 @@ an email address with your account, or send an invite to another user via their email address. -Password reset -============== - -Users can reset their password through their client. Alternatively, a server admin -can reset a users password using the `admin API `_ -or by directly editing the database as shown below. - -First calculate the hash of the new password:: - - $ ~/synapse/env/bin/hash_password - Password: - Confirm password: - $2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - -Then update the ``users`` table in the database:: - - UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' - WHERE name='@test:test.com'; - - -Synapse Development -=================== +Development +=========== +We welcome contributions to Synapse from the community! The best place to get started is our `guide for contributors `_. This is part of our larger `documentation `_, which includes -information for synapse developers as well as synapse administrators. +information for Synapse developers as well as Synapse administrators. Developers might be particularly interested in: * `Synapse's database schema `_, @@ -280,187 +225,6 @@ Alongside all that, join our developer community on Matrix: `#synapse-dev:matrix.org `_, featuring real humans! -Quick start ------------ - -Before setting up a development environment for synapse, make sure you have the -system dependencies (such as the python header files) installed - see -`Platform-specific prerequisites `_. - -To check out a synapse for development, clone the git repo into a working -directory of your choice:: - - git clone https://github.com/matrix-org/synapse.git - cd synapse - -Synapse has a number of external dependencies. We maintain a fixed development -environment using `Poetry `_. First, install poetry. We recommend:: - - pip install --user pipx - pipx install poetry - -as described `here `_. -(See `poetry's installation docs `_ -for other installation methods.) Then ask poetry to create a virtual environment -from the project and install Synapse's dependencies:: - - poetry install --extras "all test" - -This will run a process of downloading and installing all the needed -dependencies into a virtual env. - -We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`:: - - poetry run ./demo/start.sh - -(to stop, you can use ``poetry run ./demo/stop.sh``) - -See the `demo documentation `_ -for more information. - -If you just want to start a single instance of the app and run it directly:: - - # Create the homeserver.yaml config once - poetry run synapse_homeserver \ - --server-name my.domain.name \ - --config-path homeserver.yaml \ - --generate-config \ - --report-stats=[yes|no] - - # Start the app - poetry run synapse_homeserver --config-path homeserver.yaml - - -Running the unit tests ----------------------- - -After getting up and running, you may wish to run Synapse's unit tests to -check that everything is installed correctly:: - - poetry run trial tests - -This should end with a 'PASSED' result (note that exact numbers will -differ):: - - Ran 1337 tests in 716.064s - - PASSED (skips=15, successes=1322) - -For more tips on running the unit tests, like running a specific test or -to see the logging output, see the `CONTRIBUTING doc `_. - - -Running the Integration Tests ------------------------------ - -Synapse is accompanied by `SyTest `_, -a Matrix homeserver integration testing suite, which uses HTTP requests to -access the API as a Matrix client would. It is able to run Synapse directly from -the source tree, so installation of the server is not required. - -Testing with SyTest is recommended for verifying that changes related to the -Client-Server API are functioning correctly. See the `SyTest installation -instructions `_ for details. - - -Platform dependencies -===================== - -Synapse uses a number of platform dependencies such as Python and PostgreSQL, -and aims to follow supported upstream versions. See the -``_ document for more details. - - -Troubleshooting -=============== - -Need help? Join our community support room on Matrix: -`#synapse:matrix.org `_ - -Running out of File Handles ---------------------------- - -If synapse runs out of file handles, it typically fails badly - live-locking -at 100% CPU, and/or failing to accept new TCP connections (blocking the -connecting client). Matrix currently can legitimately use a lot of file handles, -thanks to busy rooms like #matrix:matrix.org containing hundreds of participating -servers. The first time a server talks in a room it will try to connect -simultaneously to all participating servers, which could exhaust the available -file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow -to respond. (We need to improve the routing algorithm used to be better than -full mesh, but as of March 2019 this hasn't happened yet). - -If you hit this failure mode, we recommend increasing the maximum number of -open file handles to be at least 4096 (assuming a default of 1024 or 256). -This is typically done by editing ``/etc/security/limits.conf`` - -Separately, Synapse may leak file handles if inbound HTTP requests get stuck -during processing - e.g. blocked behind a lock or talking to a remote server etc. -This is best diagnosed by matching up the 'Received request' and 'Processed request' -log lines and looking for any 'Processed request' lines which take more than -a few seconds to execute. Please let us know at #synapse:matrix.org if -you see this failure mode so we can help debug it, however. - -Help!! Synapse is slow and eats all my RAM/CPU! ------------------------------------------------ - -First, ensure you are running the latest version of Synapse, using Python 3 -with a PostgreSQL database. - -Synapse's architecture is quite RAM hungry currently - we deliberately -cache a lot of recent room data and metadata in RAM in order to speed up -common requests. We'll improve this in the future, but for now the easiest -way to either reduce the RAM usage (at the risk of slowing things down) -is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment -variable. The default is 0.5, which can be decreased to reduce RAM usage -in memory constrained enviroments, or increased if performance starts to -degrade. - -However, degraded performance due to a low cache factor, common on -machines with slow disks, often leads to explosions in memory use due -backlogged requests. In this case, reducing the cache factor will make -things worse. Instead, try increasing it drastically. 2.0 is a good -starting value. - -Using `libjemalloc `_ can also yield a significant -improvement in overall memory use, and especially in terms of giving back -RAM to the OS. To use it, the library must simply be put in the -LD_PRELOAD environment variable when launching Synapse. On Debian, this -can be done by installing the ``libjemalloc1`` package and adding this -line to ``/etc/default/matrix-synapse``:: - - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 - -This can make a significant difference on Python 2.7 - it's unclear how -much of an improvement it provides on Python 3.x. - -If you're encountering high CPU use by the Synapse process itself, you -may be affected by a bug with presence tracking that leads to a -massive excess of outgoing federation requests (see `discussion -`_). If metrics -indicate that your server is also issuing far more outgoing federation -requests than can be accounted for by your users' activity, this is a -likely cause. The misbehavior can be worked around by setting -the following in the Synapse config file: - -.. code-block:: yaml - - presence: - enabled: false - -People can't accept room invitations from me --------------------------------------------- - -The typical failure mode here is that you send an invitation to someone -to join a room or direct chat, but when they go to accept it, they get an -error (typically along the lines of "Invalid signature"). They might see -something like the following in their logs:: - - 2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server with key ed25519:a_EqML: Unable to verify signature for - -This is normally caused by a misconfiguration in your reverse-proxy. See -``_ and double-check that your settings are correct. - .. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix :alt: (get support on #synapse:matrix.org) :target: https://matrix.to/#/#synapse:matrix.org diff --git a/changelog.d/13491.doc b/changelog.d/13491.doc new file mode 100644 index 0000000000..026f735549 --- /dev/null +++ b/changelog.d/13491.doc @@ -0,0 +1 @@ +Tidy up Synapse's README. diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index 3dcad4bbef..7ba5a83f04 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -2,9 +2,9 @@ How do I become a server admin? --- -If your server already has an admin account you should use the user admin API to promote other accounts to become admins. See [User Admin API](../../admin_api/user_admin_api.md#Change-whether-a-user-is-a-server-administrator-or-not) +If your server already has an admin account you should use the [User Admin API](../../admin_api/user_admin_api.md#Change-whether-a-user-is-a-server-administrator-or-not) to promote other accounts to become admins. -If you don't have any admin accounts yet you won't be able to use the admin API so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account, use the admin APIs to make further changes. +If you don't have any admin accounts yet you won't be able to use the admin API, so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account: use the admin APIs to make further changes. ```sql UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; @@ -32,9 +32,11 @@ What users are registered on my server? SELECT NAME from users; ``` -Manually resetting passwords: +Manually resetting passwords --- -See https://github.com/matrix-org/synapse/blob/master/README.rst#password-reset +Users can reset their password through their client. Alternatively, a server admin +can reset a user's password using the [admin API](../../admin_api/user_admin_api.md#reset-password). + I have a problem with my server. Can I just delete my database and start again? --- @@ -101,3 +103,83 @@ LIMIT 10; You can also use the [List Room API](../../admin_api/rooms.md#list-room-api) and `order_by` `state_events`. + + +People can't accept room invitations from me +--- + +The typical failure mode here is that you send an invitation to someone +to join a room or direct chat, but when they go to accept it, they get an +error (typically along the lines of "Invalid signature"). They might see +something like the following in their logs: + + 2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server with key ed25519:a_EqML: Unable to verify signature for + +This is normally caused by a misconfiguration in your reverse-proxy. See [the reverse proxy docs](docs/reverse_proxy.md) and double-check that your settings are correct. + + +Help!! Synapse is slow and eats all my RAM/CPU! +----------------------------------------------- + +First, ensure you are running the latest version of Synapse, using Python 3 +with a [PostgreSQL database](../../postgres.md). + +Synapse's architecture is quite RAM hungry currently - we deliberately +cache a lot of recent room data and metadata in RAM in order to speed up +common requests. We'll improve this in the future, but for now the easiest +way to either reduce the RAM usage (at the risk of slowing things down) +is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment +variable. The default is 0.5, which can be decreased to reduce RAM usage +in memory constrained environments, or increased if performance starts to +degrade. + +However, degraded performance due to a low cache factor, common on +machines with slow disks, often leads to explosions in memory use due +backlogged requests. In this case, reducing the cache factor will make +things worse. Instead, try increasing it drastically. 2.0 is a good +starting value. + +Using [libjemalloc](https://jemalloc.net) can also yield a significant +improvement in overall memory use, and especially in terms of giving back +RAM to the OS. To use it, the library must simply be put in the +LD_PRELOAD environment variable when launching Synapse. On Debian, this +can be done by installing the `libjemalloc1` package and adding this +line to `/etc/default/matrix-synapse`: + + LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 + +This made a significant difference on Python 2.7 - it's unclear how +much of an improvement it provides on Python 3.x. + +If you're encountering high CPU use by the Synapse process itself, you +may be affected by a bug with presence tracking that leads to a +massive excess of outgoing federation requests (see [discussion](https://github.com/matrix-org/synapse/issues/3971)). If metrics +indicate that your server is also issuing far more outgoing federation +requests than can be accounted for by your users' activity, this is a +likely cause. The misbehavior can be worked around by disabling presence +in the Synapse config file: [see here](../configuration/config_documentation.md#presence). + + +Running out of File Handles +--------------------------- + +If Synapse runs out of file handles, it typically fails badly - live-locking +at 100% CPU, and/or failing to accept new TCP connections (blocking the +connecting client). Matrix currently can legitimately use a lot of file handles, +thanks to busy rooms like `#matrix:matrix.org` containing hundreds of participating +servers. The first time a server talks in a room it will try to connect +simultaneously to all participating servers, which could exhaust the available +file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow +to respond. (We need to improve the routing algorithm used to be better than +full mesh, but as of March 2019 this hasn't happened yet). + +If you hit this failure mode, we recommend increasing the maximum number of +open file handles to be at least 4096 (assuming a default of 1024 or 256). +This is typically done by editing ``/etc/security/limits.conf`` + +Separately, Synapse may leak file handles if inbound HTTP requests get stuck +during processing - e.g. blocked behind a lock or talking to a remote server etc. +This is best diagnosed by matching up the 'Received request' and 'Processed request' +log lines and looking for any 'Processed request' lines which take more than +a few seconds to execute. Please let us know at [`#synapse:matrix.org`](https://matrix.to/#/#synapse-dev:matrix.org) if +you see this failure mode so we can help debug it, however. -- cgit 1.4.1 From 8bdf2bd31ef003f0e89a588d8977d4f689ef6856 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 17 Aug 2022 18:08:23 +0000 Subject: Fix a bug in the `/event_reports` Admin API which meant that the total count could be larger than the number of results you can actually query for. (#13525) Co-authored-by: Brendan Abolivier --- changelog.d/13525.bugfix | 1 + synapse/storage/databases/main/room.py | 6 ++++++ tests/rest/admin/test_event_reports.py | 27 +++++++++++++++++++++++++++ 3 files changed, 34 insertions(+) create mode 100644 changelog.d/13525.bugfix diff --git a/changelog.d/13525.bugfix b/changelog.d/13525.bugfix new file mode 100644 index 0000000000..dbd1adbc88 --- /dev/null +++ b/changelog.d/13525.bugfix @@ -0,0 +1 @@ +Fix a bug in the `/event_reports` Admin API which meant that the total count could be larger than the number of results you can actually query for. \ No newline at end of file diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 0f1f0d11ea..b7d4baa6bb 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -2001,9 +2001,15 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" + # We join on room_stats_state despite not using any columns from it + # because the join can influence the number of rows returned; + # e.g. a room that doesn't have state, maybe because it was deleted. + # The query returning the total count should be consistent with + # the query returning the results. sql = """ SELECT COUNT(*) as total_event_reports FROM event_reports AS er + JOIN room_stats_state ON room_stats_state.room_id = er.room_id {} """.format( where_clause diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index fbc490f46d..8a4e5c3f77 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -410,6 +410,33 @@ class EventReportsTestCase(unittest.HomeserverTestCase): self.assertIn("score", c) self.assertIn("reason", c) + def test_count_correct_despite_table_deletions(self) -> None: + """ + Tests that the count matches the number of rows, even if rows in joined tables + are missing. + """ + + # Delete rows from room_stats_state for one of our rooms. + self.get_success( + self.hs.get_datastores().main.db_pool.simple_delete( + "room_stats_state", {"room_id": self.room_id1}, desc="_" + ) + ) + + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + # The 'total' field is 10 because only 10 reports will actually + # be retrievable since we deleted the rows in the room_stats_state + # table. + self.assertEqual(channel.json_body["total"], 10) + # This is consistent with the number of rows actually returned. + self.assertEqual(len(channel.json_body["event_reports"]), 10) + class EventReportDetailTestCase(unittest.HomeserverTestCase): servlets = [ -- cgit 1.4.1 From 49d04e43dfa0551f52c1f1872b6f311efa756ca8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 Aug 2022 16:10:07 -0500 Subject: Add metrics to track how the rate limiter is affecting requests (sleep/reject) (#13534) Related to https://github.com/matrix-org/synapse/pull/13499 Part of https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/13534.misc | 1 + synapse/util/ratelimitutils.py | 37 +++++++++++++++++++++++++++++-------- 2 files changed, 30 insertions(+), 8 deletions(-) create mode 100644 changelog.d/13534.misc diff --git a/changelog.d/13534.misc b/changelog.d/13534.misc new file mode 100644 index 0000000000..b488bf74c3 --- /dev/null +++ b/changelog.d/13534.misc @@ -0,0 +1 @@ +Add metrics to track how the rate limiter is affecting requests (sleep/reject). diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index e48324d926..434b02b97b 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -18,6 +18,8 @@ import logging import typing from typing import Any, DefaultDict, Iterator, List, Set +from prometheus_client.core import Counter + from twisted.internet import defer from synapse.api.errors import LimitExceededError @@ -37,6 +39,9 @@ if typing.TYPE_CHECKING: logger = logging.getLogger(__name__) +# Track how much the ratelimiter is affecting requests +rate_limit_sleep_counter = Counter("synapse_rate_limit_sleep", "") +rate_limit_reject_counter = Counter("synapse_rate_limit_reject", "") queue_wait_timer = Histogram( "synapse_rate_limit_queue_wait_time_seconds", "sec", @@ -84,7 +89,7 @@ class FederationRateLimiter: Returns: context manager which returns a deferred. """ - return self.ratelimiters[host].ratelimit() + return self.ratelimiters[host].ratelimit(host) class _PerHostRatelimiter: @@ -119,12 +124,14 @@ class _PerHostRatelimiter: self.request_times: List[int] = [] @contextlib.contextmanager - def ratelimit(self) -> "Iterator[defer.Deferred[None]]": + def ratelimit(self, host: str) -> "Iterator[defer.Deferred[None]]": # `contextlib.contextmanager` takes a generator and turns it into a # context manager. The generator should only yield once with a value # to be returned by manager. # Exceptions will be reraised at the yield. + self.host = host + request_id = object() ret = self._on_enter(request_id) try: @@ -144,6 +151,8 @@ class _PerHostRatelimiter: # sleeping or in the ready queue). queue_size = len(self.ready_request_queue) + len(self.sleeping_requests) if queue_size > self.reject_limit: + logger.debug("Ratelimiter(%s): rejecting request", self.host) + rate_limit_reject_counter.inc() raise LimitExceededError( retry_after_ms=int(self.window_size / self.sleep_limit) ) @@ -155,7 +164,8 @@ class _PerHostRatelimiter: queue_defer: defer.Deferred[None] = defer.Deferred() self.ready_request_queue[request_id] = queue_defer logger.info( - "Ratelimiter: queueing request (queue now %i items)", + "Ratelimiter(%s): queueing request (queue now %i items)", + self.host, len(self.ready_request_queue), ) @@ -164,19 +174,28 @@ class _PerHostRatelimiter: return defer.succeed(None) logger.debug( - "Ratelimit [%s]: len(self.request_times)=%d", + "Ratelimit(%s) [%s]: len(self.request_times)=%d", + self.host, id(request_id), len(self.request_times), ) if len(self.request_times) > self.sleep_limit: - logger.debug("Ratelimiter: sleeping request for %f sec", self.sleep_sec) + logger.debug( + "Ratelimiter(%s) [%s]: sleeping request for %f sec", + self.host, + id(request_id), + self.sleep_sec, + ) + rate_limit_sleep_counter.inc() ret_defer = run_in_background(self.clock.sleep, self.sleep_sec) self.sleeping_requests.add(request_id) def on_wait_finished(_: Any) -> "defer.Deferred[None]": - logger.debug("Ratelimit [%s]: Finished sleeping", id(request_id)) + logger.debug( + "Ratelimit(%s) [%s]: Finished sleeping", self.host, id(request_id) + ) self.sleeping_requests.discard(request_id) queue_defer = queue_request() return queue_defer @@ -186,7 +205,9 @@ class _PerHostRatelimiter: ret_defer = queue_request() def on_start(r: object) -> object: - logger.debug("Ratelimit [%s]: Processing req", id(request_id)) + logger.debug( + "Ratelimit(%s) [%s]: Processing req", self.host, id(request_id) + ) self.current_processing.add(request_id) return r @@ -217,7 +238,7 @@ class _PerHostRatelimiter: return make_deferred_yieldable(ret_defer) def _on_exit(self, request_id: object) -> None: - logger.debug("Ratelimit [%s]: Processed req", id(request_id)) + logger.debug("Ratelimit(%s) [%s]: Processed req", self.host, id(request_id)) self.current_processing.discard(request_id) try: # start processing the next item on the queue. -- cgit 1.4.1 From 84169a82dcf7dfb6eb7d307ea7f5e33cb57f6e3f Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 18 Aug 2022 11:53:02 +0100 Subject: Avoid blocking lazy-loading `/sync`s during partial joins (#13477) Use a state filter or accept partial state in a few places where we request state, to avoid blocking. To make lazy-loading `/sync`s work, we need to provide the memberships of event senders, which are not guaranteed to be in the room state. Instead we dig through auth events for memberships to present to clients. The auth events of an event are guaranteed to contain a passable membership event, otherwise the event would have been rejected. Note that this only covers the common code paths encountered during testing. There has been no exhaustive checking of all sync code paths. Fixes #13146. Signed-off-by: Sean Quah --- changelog.d/13477.misc | 1 + synapse/handlers/sync.py | 253 ++++++++++++++++++++++++++++++----- synapse/storage/controllers/state.py | 24 +++- 3 files changed, 244 insertions(+), 34 deletions(-) create mode 100644 changelog.d/13477.misc diff --git a/changelog.d/13477.misc b/changelog.d/13477.misc new file mode 100644 index 0000000000..5d21ae9d7a --- /dev/null +++ b/changelog.d/13477.misc @@ -0,0 +1 @@ +Faster room joins: Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3ca01391c9..b4d3f3958c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -16,9 +16,11 @@ import logging from typing import ( TYPE_CHECKING, Any, + Collection, Dict, FrozenSet, List, + Mapping, Optional, Sequence, Set, @@ -517,10 +519,17 @@ class SyncHandler: # ensure that we always include current state in the timeline current_state_ids: FrozenSet[str] = frozenset() if any(e.is_state() for e in recents): + # FIXME(faster_joins): We use the partial state here as + # we don't want to block `/sync` on finishing a lazy join. + # Which should be fine once + # https://github.com/matrix-org/synapse/issues/12989 is resolved, + # since we shouldn't reach here anymore? + # Note that we use the current state as a whitelist for filtering + # `recents`, so partial state is only a problem when a membership + # event turns up in `recents` but has not made it into the current + # state. current_state_ids_map = ( - await self._state_storage_controller.get_current_state_ids( - room_id - ) + await self.store.get_partial_current_state_ids(room_id) ) current_state_ids = frozenset(current_state_ids_map.values()) @@ -589,7 +598,13 @@ class SyncHandler: if any(e.is_state() for e in loaded_recents): # FIXME(faster_joins): We use the partial state here as # we don't want to block `/sync` on finishing a lazy join. - # Is this the correct way of doing it? + # Which should be fine once + # https://github.com/matrix-org/synapse/issues/12989 is resolved, + # since we shouldn't reach here anymore? + # Note that we use the current state as a whitelist for filtering + # `loaded_recents`, so partial state is only a problem when a + # membership event turns up in `loaded_recents` but has not made it + # into the current state. current_state_ids_map = ( await self.store.get_partial_current_state_ids(room_id) ) @@ -637,7 +652,10 @@ class SyncHandler: ) async def get_state_after_event( - self, event_id: str, state_filter: Optional[StateFilter] = None + self, + event_id: str, + state_filter: Optional[StateFilter] = None, + await_full_state: bool = True, ) -> StateMap[str]: """ Get the room state after the given event @@ -645,9 +663,14 @@ class SyncHandler: Args: event_id: event of interest state_filter: The state filter used to fetch state from the database. + await_full_state: if `True`, will block if we do not yet have complete state + at the event and `state_filter` is not satisfied by partial state. + Defaults to `True`. """ state_ids = await self._state_storage_controller.get_state_ids_for_event( - event_id, state_filter=state_filter or StateFilter.all() + event_id, + state_filter=state_filter or StateFilter.all(), + await_full_state=await_full_state, ) # using get_metadata_for_events here (instead of get_event) sidesteps an issue @@ -670,6 +693,7 @@ class SyncHandler: room_id: str, stream_position: StreamToken, state_filter: Optional[StateFilter] = None, + await_full_state: bool = True, ) -> StateMap[str]: """Get the room state at a particular stream position @@ -677,6 +701,9 @@ class SyncHandler: room_id: room for which to get state stream_position: point at which to get state state_filter: The state filter used to fetch state from the database. + await_full_state: if `True`, will block if we do not yet have complete state + at the last event in the room before `stream_position` and + `state_filter` is not satisfied by partial state. Defaults to `True`. """ # FIXME: This gets the state at the latest event before the stream ordering, # which might not be the same as the "current state" of the room at the time @@ -688,7 +715,9 @@ class SyncHandler: if last_event_id: state = await self.get_state_after_event( - last_event_id, state_filter=state_filter or StateFilter.all() + last_event_id, + state_filter=state_filter or StateFilter.all(), + await_full_state=await_full_state, ) else: @@ -891,7 +920,15 @@ class SyncHandler: with Measure(self.clock, "compute_state_delta"): # The memberships needed for events in the timeline. # Only calculated when `lazy_load_members` is on. - members_to_fetch = None + members_to_fetch: Optional[Set[str]] = None + + # A dictionary mapping user IDs to the first event in the timeline sent by + # them. Only calculated when `lazy_load_members` is on. + first_event_by_sender_map: Optional[Dict[str, EventBase]] = None + + # The contribution to the room state from state events in the timeline. + # Only contains the last event for any given state key. + timeline_state: StateMap[str] lazy_load_members = sync_config.filter_collection.lazy_load_members() include_redundant_members = ( @@ -902,10 +939,23 @@ class SyncHandler: # We only request state for the members needed to display the # timeline: - members_to_fetch = { - event.sender # FIXME: we also care about invite targets etc. - for event in batch.events - } + timeline_state = {} + + members_to_fetch = set() + first_event_by_sender_map = {} + for event in batch.events: + # Build the map from user IDs to the first timeline event they sent. + if event.sender not in first_event_by_sender_map: + first_event_by_sender_map[event.sender] = event + + # We need the event's sender, unless their membership was in a + # previous timeline event. + if (EventTypes.Member, event.sender) not in timeline_state: + members_to_fetch.add(event.sender) + # FIXME: we also care about invite targets etc. + + if event.is_state(): + timeline_state[(event.type, event.state_key)] = event.event_id if full_state: # always make sure we LL ourselves so we know we're in the room @@ -915,16 +965,21 @@ class SyncHandler: members_to_fetch.add(sync_config.user.to_string()) state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) + + # We are happy to use partial state to compute the `/sync` response. + # Since partial state may not include the lazy-loaded memberships we + # require, we fix up the state response afterwards with memberships from + # auth events. + await_full_state = False else: - state_filter = StateFilter.all() + timeline_state = { + (event.type, event.state_key): event.event_id + for event in batch.events + if event.is_state() + } - # The contribution to the room state from state events in the timeline. - # Only contains the last event for any given state key. - timeline_state = { - (event.type, event.state_key): event.event_id - for event in batch.events - if event.is_state() - } + state_filter = StateFilter.all() + await_full_state = True # Now calculate the state to return in the sync response for the room. # This is more or less the change in state between the end of the previous @@ -936,19 +991,26 @@ class SyncHandler: if batch: state_at_timeline_end = ( await self._state_storage_controller.get_state_ids_for_event( - batch.events[-1].event_id, state_filter=state_filter + batch.events[-1].event_id, + state_filter=state_filter, + await_full_state=await_full_state, ) ) state_at_timeline_start = ( await self._state_storage_controller.get_state_ids_for_event( - batch.events[0].event_id, state_filter=state_filter + batch.events[0].event_id, + state_filter=state_filter, + await_full_state=await_full_state, ) ) else: state_at_timeline_end = await self.get_state_at( - room_id, stream_position=now_token, state_filter=state_filter + room_id, + stream_position=now_token, + state_filter=state_filter, + await_full_state=await_full_state, ) state_at_timeline_start = state_at_timeline_end @@ -964,14 +1026,19 @@ class SyncHandler: if batch: state_at_timeline_start = ( await self._state_storage_controller.get_state_ids_for_event( - batch.events[0].event_id, state_filter=state_filter + batch.events[0].event_id, + state_filter=state_filter, + await_full_state=await_full_state, ) ) else: # We can get here if the user has ignored the senders of all # the recent events. state_at_timeline_start = await self.get_state_at( - room_id, stream_position=now_token, state_filter=state_filter + room_id, + stream_position=now_token, + state_filter=state_filter, + await_full_state=await_full_state, ) # for now, we disable LL for gappy syncs - see @@ -993,20 +1060,28 @@ class SyncHandler: # is indeed the case. assert since_token is not None state_at_previous_sync = await self.get_state_at( - room_id, stream_position=since_token, state_filter=state_filter + room_id, + stream_position=since_token, + state_filter=state_filter, + await_full_state=await_full_state, ) if batch: state_at_timeline_end = ( await self._state_storage_controller.get_state_ids_for_event( - batch.events[-1].event_id, state_filter=state_filter + batch.events[-1].event_id, + state_filter=state_filter, + await_full_state=await_full_state, ) ) else: # We can get here if the user has ignored the senders of all # the recent events. state_at_timeline_end = await self.get_state_at( - room_id, stream_position=now_token, state_filter=state_filter + room_id, + stream_position=now_token, + state_filter=state_filter, + await_full_state=await_full_state, ) state_ids = _calculate_state( @@ -1036,8 +1111,23 @@ class SyncHandler: (EventTypes.Member, member) for member in members_to_fetch ), + await_full_state=False, ) + # If we only have partial state for the room, `state_ids` may be missing the + # memberships we wanted. We attempt to find some by digging through the auth + # events of timeline events. + if lazy_load_members and await self.store.is_partial_state_room(room_id): + assert members_to_fetch is not None + assert first_event_by_sender_map is not None + + additional_state_ids = ( + await self._find_missing_partial_state_memberships( + room_id, members_to_fetch, first_event_by_sender_map, state_ids + ) + ) + state_ids = {**state_ids, **additional_state_ids} + # At this point, if `lazy_load_members` is enabled, `state_ids` includes # the memberships of all event senders in the timeline. This is because we # may not have sent the memberships in a previous sync. @@ -1086,6 +1176,99 @@ class SyncHandler: if e.type != EventTypes.Aliases # until MSC2261 or alternative solution } + async def _find_missing_partial_state_memberships( + self, + room_id: str, + members_to_fetch: Collection[str], + events_with_membership_auth: Mapping[str, EventBase], + found_state_ids: StateMap[str], + ) -> StateMap[str]: + """Finds missing memberships from a set of auth events and returns them as a + state map. + + Args: + room_id: The partial state room to find the remaining memberships for. + members_to_fetch: The memberships to find. + events_with_membership_auth: A mapping from user IDs to events whose auth + events are known to contain their membership. + found_state_ids: A dict from (type, state_key) -> state_event_id, containing + memberships that have been previously found. Entries in + `members_to_fetch` that have a membership in `found_state_ids` are + ignored. + + Returns: + A dict from ("m.room.member", state_key) -> state_event_id, containing the + memberships missing from `found_state_ids`. + + Raises: + KeyError: if `events_with_membership_auth` does not have an entry for a + missing membership. Memberships in `found_state_ids` do not need an + entry in `events_with_membership_auth`. + """ + additional_state_ids: MutableStateMap[str] = {} + + # Tracks the missing members for logging purposes. + missing_members = set() + + # Identify memberships missing from `found_state_ids` and pick out the auth + # events in which to look for them. + auth_event_ids: Set[str] = set() + for member in members_to_fetch: + if (EventTypes.Member, member) in found_state_ids: + continue + + missing_members.add(member) + event_with_membership_auth = events_with_membership_auth[member] + auth_event_ids.update(event_with_membership_auth.auth_event_ids()) + + auth_events = await self.store.get_events(auth_event_ids) + + # Run through the missing memberships once more, picking out the memberships + # from the pile of auth events we have just fetched. + for member in members_to_fetch: + if (EventTypes.Member, member) in found_state_ids: + continue + + event_with_membership_auth = events_with_membership_auth[member] + + # Dig through the auth events to find the desired membership. + for auth_event_id in event_with_membership_auth.auth_event_ids(): + # We only store events once we have all their auth events, + # so the auth event must be in the pile we have just + # fetched. + auth_event = auth_events[auth_event_id] + + if ( + auth_event.type == EventTypes.Member + and auth_event.state_key == member + ): + missing_members.remove(member) + additional_state_ids[ + (EventTypes.Member, member) + ] = auth_event.event_id + break + + if missing_members: + # There really shouldn't be any missing memberships now. Either: + # * we couldn't find an auth event, which shouldn't happen because we do + # not persist events with persisting their auth events first, or + # * the set of auth events did not contain a membership we wanted, which + # means our caller didn't compute the events in `members_to_fetch` + # correctly, or we somehow accepted an event whose auth events were + # dodgy. + logger.error( + "Failed to find memberships for %s in partial state room " + "%s in the auth events of %s.", + missing_members, + room_id, + [ + events_with_membership_auth[member].event_id + for member in missing_members + ], + ) + + return additional_state_ids + async def unread_notifs_for_room_id( self, room_id: str, sync_config: SyncConfig ) -> NotifCounts: @@ -1730,7 +1913,11 @@ class SyncHandler: continue if room_id in sync_result_builder.joined_room_ids or has_join: - old_state_ids = await self.get_state_at(room_id, since_token) + old_state_ids = await self.get_state_at( + room_id, + since_token, + state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]), + ) old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None) old_mem_ev = None if old_mem_ev_id: @@ -1756,7 +1943,13 @@ class SyncHandler: newly_left_rooms.append(room_id) else: if not old_state_ids: - old_state_ids = await self.get_state_at(room_id, since_token) + old_state_ids = await self.get_state_at( + room_id, + since_token, + state_filter=StateFilter.from_types( + [(EventTypes.Member, user_id)] + ), + ) old_mem_ev_id = old_state_ids.get( (EventTypes.Member, user_id), None ) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 1ad002f57b..f9ffd0e29e 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -234,6 +234,7 @@ class StateStorageController: self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None, + await_full_state: bool = True, ) -> Dict[str, StateMap[str]]: """ Get the state dicts corresponding to a list of events, containing the event_ids @@ -242,6 +243,9 @@ class StateStorageController: Args: event_ids: events whose state should be returned state_filter: The state filter used to fetch state from the database. + await_full_state: if `True`, will block if we do not yet have complete state + at these events and `state_filter` is not satisfied by partial state. + Defaults to `True`. Returns: A dict from event_id -> (type, state_key) -> event_id @@ -250,8 +254,12 @@ class StateStorageController: RuntimeError if we don't have a state group for one or more of the events (ie they are outliers or unknown) """ - await_full_state = True - if state_filter and not state_filter.must_await_full_state(self._is_mine_id): + if ( + await_full_state + and state_filter + and not state_filter.must_await_full_state(self._is_mine_id) + ): + # Full state is not required if the state filter is restrictive enough. await_full_state = False event_to_groups = await self.get_state_group_for_events( @@ -294,7 +302,10 @@ class StateStorageController: @trace async def get_state_ids_for_event( - self, event_id: str, state_filter: Optional[StateFilter] = None + self, + event_id: str, + state_filter: Optional[StateFilter] = None, + await_full_state: bool = True, ) -> StateMap[str]: """ Get the state dict corresponding to a particular event @@ -302,6 +313,9 @@ class StateStorageController: Args: event_id: event whose state should be returned state_filter: The state filter used to fetch state from the database. + await_full_state: if `True`, will block if we do not yet have complete state + at the event and `state_filter` is not satisfied by partial state. + Defaults to `True`. Returns: A dict from (type, state_key) -> state_event_id @@ -311,7 +325,9 @@ class StateStorageController: outlier or is unknown) """ state_map = await self.get_state_ids_for_events( - [event_id], state_filter or StateFilter.all() + [event_id], + state_filter or StateFilter.all(), + await_full_state=await_full_state, ) return state_map[event_id] -- cgit 1.4.1 From 22ea51faf9a29cd9b96189d1fac95eaf2cfcf4ec Mon Sep 17 00:00:00 2001 From: Ayush Anand Date: Thu, 18 Aug 2022 19:44:47 +0530 Subject: Add support for compression to federation responses (#13537) Closes #13415. Signed-off-by: Ayush Anand --- changelog.d/13537.bugfix | 1 + docs/usage/configuration/config_documentation.md | 2 +- synapse/app/homeserver.py | 5 ++++- 3 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13537.bugfix diff --git a/changelog.d/13537.bugfix b/changelog.d/13537.bugfix new file mode 100644 index 0000000000..db843504b1 --- /dev/null +++ b/changelog.d/13537.bugfix @@ -0,0 +1 @@ +Add support for compression to federation responses. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index aa175a0d91..cc72966823 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -444,7 +444,7 @@ Sub-options for each listener include: * `names`: a list of names of HTTP resources. See below for a list of valid resource names. * `compress`: set to true to enable gzip compression on HTTP bodies for this resource. This is currently only supported with the - `client`, `consent` and `metrics` resources. + `client`, `consent`, `metrics` and `federation` resources. * `additional_resources`: Only valid for an 'http' listener. A map of additional endpoints which should be loaded via dynamic modules. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 745e704141..d98012adeb 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -220,7 +220,10 @@ class SynapseHomeServer(HomeServer): resources.update({"/_matrix/consent": consent_resource}) if name == "federation": - resources.update({FEDERATION_PREFIX: TransportLayerServer(self)}) + federation_resource: Resource = TransportLayerServer(self) + if compress: + federation_resource = gz_wrap(federation_resource) + resources.update({FEDERATION_PREFIX: federation_resource}) if name == "openid": resources.update( -- cgit 1.4.1 From d64653d062a7fc27782e70c1ca581e85b7730e72 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 Aug 2022 10:05:07 -0500 Subject: Track number of hosts affected by the rate limiter (#13541) Track number of hosts affected by the rate limiter so we can differentiate one really noisy homeserver from a general ratelimit tuning problem across the federation. Follow-up to https://github.com/matrix-org/synapse/pull/13534 Part of https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/13541.misc | 1 + synapse/util/ratelimitutils.py | 43 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13541.misc diff --git a/changelog.d/13541.misc b/changelog.d/13541.misc new file mode 100644 index 0000000000..b488bf74c3 --- /dev/null +++ b/changelog.d/13541.misc @@ -0,0 +1 @@ +Add metrics to track how the rate limiter is affecting requests (sleep/reject). diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 434b02b97b..724d39b92f 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -30,7 +30,7 @@ from synapse.logging.context import ( run_in_background, ) from synapse.logging.opentracing import start_active_span -from synapse.metrics import Histogram +from synapse.metrics import Histogram, LaterGauge from synapse.util import Clock if typing.TYPE_CHECKING: @@ -74,6 +74,27 @@ class FederationRateLimiter: str, "_PerHostRatelimiter" ] = collections.defaultdict(new_limiter) + # We track the number of affected hosts per time-period so we can + # differentiate one really noisy homeserver from a general + # ratelimit tuning problem across the federation. + LaterGauge( + "synapse_rate_limit_sleep_affected_hosts", + "Number of hosts that had requests put to sleep", + [], + lambda: sum( + ratelimiter.should_sleep() for ratelimiter in self.ratelimiters.values() + ), + ) + LaterGauge( + "synapse_rate_limit_reject_affected_hosts", + "Number of hosts that had requests rejected", + [], + lambda: sum( + ratelimiter.should_reject() + for ratelimiter in self.ratelimiters.values() + ), + ) + def ratelimit(self, host: str) -> "_GeneratorContextManager[defer.Deferred[None]]": """Used to ratelimit an incoming request from a given host @@ -139,6 +160,21 @@ class _PerHostRatelimiter: finally: self._on_exit(request_id) + def should_reject(self) -> bool: + """ + Whether to reject the request if we already have too many queued up + (either sleeping or in the ready queue). + """ + queue_size = len(self.ready_request_queue) + len(self.sleeping_requests) + return queue_size > self.reject_limit + + def should_sleep(self) -> bool: + """ + Whether to sleep the request if we already have too many requests coming + through within the window. + """ + return len(self.request_times) > self.sleep_limit + def _on_enter(self, request_id: object) -> "defer.Deferred[None]": time_now = self.clock.time_msec() @@ -149,8 +185,7 @@ class _PerHostRatelimiter: # reject the request if we already have too many queued up (either # sleeping or in the ready queue). - queue_size = len(self.ready_request_queue) + len(self.sleeping_requests) - if queue_size > self.reject_limit: + if self.should_reject(): logger.debug("Ratelimiter(%s): rejecting request", self.host) rate_limit_reject_counter.inc() raise LimitExceededError( @@ -180,7 +215,7 @@ class _PerHostRatelimiter: len(self.request_times), ) - if len(self.request_times) > self.sleep_limit: + if self.should_sleep(): logger.debug( "Ratelimiter(%s) [%s]: sleeping request for %f sec", self.host, -- cgit 1.4.1 From b251cff8196e4130b2a6951c8fe569ed46779443 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 18 Aug 2022 16:26:26 +0100 Subject: Fix incorrect juggling of logging contexts in `_PerHostRatelimiter` (#13554) Signed-off-by: Sean Quah Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/13554.misc | 1 + synapse/util/ratelimitutils.py | 17 +++++++---------- 2 files changed, 8 insertions(+), 10 deletions(-) create mode 100644 changelog.d/13554.misc diff --git a/changelog.d/13554.misc b/changelog.d/13554.misc new file mode 100644 index 0000000000..99dbcebec8 --- /dev/null +++ b/changelog.d/13554.misc @@ -0,0 +1 @@ +Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 724d39b92f..f678b52cb4 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -154,7 +154,9 @@ class _PerHostRatelimiter: self.host = host request_id = object() - ret = self._on_enter(request_id) + # Ideally we'd use `Deferred.fromCoroutine()` here, to save on redundant + # type-checking, but we'd need Twisted >= 21.2. + ret = defer.ensureDeferred(self._on_enter_with_tracing(request_id)) try: yield ret finally: @@ -175,6 +177,10 @@ class _PerHostRatelimiter: """ return len(self.request_times) > self.sleep_limit + async def _on_enter_with_tracing(self, request_id: object) -> None: + with start_active_span("ratelimit wait"), queue_wait_timer.time(): + await self._on_enter(request_id) + def _on_enter(self, request_id: object) -> "defer.Deferred[None]": time_now = self.clock.time_msec() @@ -257,17 +263,8 @@ class _PerHostRatelimiter: # Ensure that we've properly cleaned up. self.sleeping_requests.discard(request_id) self.ready_request_queue.pop(request_id, None) - wait_span_scope.__exit__(None, None, None) - wait_timer_cm.__exit__(None, None, None) return r - # Tracing - wait_span_scope = start_active_span("ratelimit wait") - wait_span_scope.__enter__() - # Metrics - wait_timer_cm = queue_wait_timer.time() - wait_timer_cm.__enter__() - ret_defer.addCallbacks(on_start, on_err) ret_defer.addBoth(on_both) return make_deferred_yieldable(ret_defer) -- cgit 1.4.1 From 2c42673a9b8c708a73f49575673c85a32ea32b82 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 Aug 2022 14:15:53 -0500 Subject: Add metrics to track `/messages` response time by room size (#13545) Follow-up to https://github.com/matrix-org/synapse/pull/13533 Part of https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/13545.misc | 1 + synapse/rest/client/room.py | 55 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13545.misc diff --git a/changelog.d/13545.misc b/changelog.d/13545.misc new file mode 100644 index 0000000000..1cdbef179e --- /dev/null +++ b/changelog.d/13545.misc @@ -0,0 +1 @@ +Update metrics to track `/messages` response time by room size. diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index d29417fafc..13bc9482c5 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -16,6 +16,7 @@ """ This module contains REST servlets to do with rooms: /rooms/ """ import logging import re +from enum import Enum from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple from urllib import parse as urlparse @@ -48,6 +49,7 @@ from synapse.http.servlet import ( parse_strings_from_args, ) from synapse.http.site import SynapseRequest +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag from synapse.rest.client._base import client_patterns from synapse.rest.client.transactions import HttpTransactionCache @@ -62,6 +64,33 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) + +class _RoomSize(Enum): + """ + Enum to differentiate sizes of rooms. This is a pretty good approximation + about how hard it will be to get events in the room. We could also look at + room "complexity". + """ + + # This doesn't necessarily mean the room is a DM, just that there is a DM + # amount of people there. + DM_SIZE = "direct_message_size" + SMALL = "small" + SUBSTANTIAL = "substantial" + LARGE = "large" + + @staticmethod + def from_member_count(member_count: int) -> "_RoomSize": + if member_count <= 2: + return _RoomSize.DM_SIZE + elif member_count < 100: + return _RoomSize.SMALL + elif member_count < 1000: + return _RoomSize.SUBSTANTIAL + else: + return _RoomSize.LARGE + + # This is an extra metric on top of `synapse_http_server_response_time_seconds` # which times the same sort of thing but this one allows us to see values # greater than 10s. We use a separate dedicated histogram with its own buckets @@ -70,7 +99,11 @@ logger = logging.getLogger(__name__) messsages_response_timer = Histogram( "synapse_room_message_list_rest_servlet_response_time_seconds", "sec", - [], + # We have a label for room size so we can try to see a more realistic + # picture of /messages response time for bigger rooms. We don't want the + # tiny rooms that can always respond fast skewing our results when we're trying + # to optimize the bigger cases. + ["room_size"], buckets=( 0.005, 0.01, @@ -587,14 +620,26 @@ class RoomMessageListRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self._hs = hs + self.clock = hs.get_clock() self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() self.store = hs.get_datastores().main - @messsages_response_timer.time() async def on_GET( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: + processing_start_time = self.clock.time_msec() + # Fire off and hope that we get a result by the end. + # + # We're using the mypy type ignore comment because the `@cached` + # decorator on `get_number_joined_users_in_room` doesn't play well with + # the type system. Maybe in the future, it can use some ParamSpec + # wizardry to fix it up. + room_member_count_deferred = run_in_background( # type: ignore[call-arg] + self.store.get_number_joined_users_in_room, + room_id, # type: ignore[arg-type] + ) + requester = await self.auth.get_user_by_req(request, allow_guest=True) pagination_config = await PaginationConfig.from_request( self.store, request, default_limit=10 @@ -625,6 +670,12 @@ class RoomMessageListRestServlet(RestServlet): event_filter=event_filter, ) + processing_end_time = self.clock.time_msec() + room_member_count = await make_deferred_yieldable(room_member_count_deferred) + messsages_response_timer.labels( + room_size=_RoomSize.from_member_count(room_member_count) + ).observe((processing_start_time - processing_end_time) / 1000) + return 200, msgs -- cgit 1.4.1 From 3a245f6cfe3f35f5a37bcd91f3242ef59dc71332 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 19 Aug 2022 11:03:29 +0000 Subject: Fix validation problem that occurs when a user tries to deactivate their account or change their password. (#13563) --- changelog.d/13563.feature | 1 + synapse/rest/client/account.py | 6 +++--- tests/handlers/test_deactivate_account.py | 15 +++++++++++++++ 3 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13563.feature diff --git a/changelog.d/13563.feature b/changelog.d/13563.feature new file mode 100644 index 0000000000..4c39b74289 --- /dev/null +++ b/changelog.d/13563.feature @@ -0,0 +1 @@ +Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index e5ee63133b..9041e29d6c 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -196,7 +196,7 @@ class PasswordRestServlet(RestServlet): params, session_id = await self.auth_handler.validate_user_via_ui_auth( requester, request, - body.dict(), + body.dict(exclude_unset=True), "modify your account password", ) except InteractiveAuthIncompleteError as e: @@ -219,7 +219,7 @@ class PasswordRestServlet(RestServlet): result, params, session_id = await self.auth_handler.check_ui_auth( [[LoginType.EMAIL_IDENTITY]], request, - body.dict(), + body.dict(exclude_unset=True), "modify your account password", ) except InteractiveAuthIncompleteError as e: @@ -316,7 +316,7 @@ class DeactivateAccountRestServlet(RestServlet): await self.auth_handler.validate_user_via_ui_auth( requester, request, - body.dict(), + body.dict(exclude_unset=True), "deactivate your account", ) result = await self._deactivate_account_handler.deactivate_account( diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py index 82baa8f154..7b9b711521 100644 --- a/tests/handlers/test_deactivate_account.py +++ b/tests/handlers/test_deactivate_account.py @@ -322,3 +322,18 @@ class DeactivateAccountTestCase(HomeserverTestCase): ) ), ) + + def test_deactivate_account_needs_auth(self) -> None: + """ + Tests that making a request to /deactivate with an empty body + succeeds in starting the user-interactive auth flow. + """ + req = self.make_request( + "POST", + "account/deactivate", + {}, + access_token=self.token, + ) + + self.assertEqual(req.code, 401, req) + self.assertEqual(req.json_body["flows"], [{"stages": ["m.login.password"]}]) -- cgit 1.4.1 From f3fba4914d63bd9ee0573b61c9631b58ec263070 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 19 Aug 2022 08:25:24 -0400 Subject: Reduce the number of tests using TCP replication. (#13543) Uses Redis replication in additional test cases (instead of TCP replication). A small step towards dropping TCP replication. --- changelog.d/13543.misc | 1 + tests/handlers/test_room_member.py | 4 +- tests/module_api/test_api.py | 7 -- tests/replication/_base.py | 90 ++++++++--------------- tests/replication/tcp/test_handler.py | 4 +- tests/replication/test_sharded_event_persister.py | 7 -- 6 files changed, 36 insertions(+), 77 deletions(-) create mode 100644 changelog.d/13543.misc diff --git a/changelog.d/13543.misc b/changelog.d/13543.misc new file mode 100644 index 0000000000..0300f46cd8 --- /dev/null +++ b/changelog.d/13543.misc @@ -0,0 +1 @@ +Reduce the number of tests using legacy TCP replication. diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index b4e1405aee..1d13ed1e88 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -14,7 +14,7 @@ from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util import Clock -from tests.replication._base import RedisMultiWorkerStreamTestCase +from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import make_request from tests.test_utils import make_awaitable from tests.unittest import FederatingHomeserverTestCase, override_config @@ -216,7 +216,7 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): # - trying to remote-join again. -class TestReplicatedJoinsLimitedByPerRoomRateLimiter(RedisMultiWorkerStreamTestCase): +class TestReplicatedJoinsLimitedByPerRoomRateLimiter(BaseMultiWorkerStreamTestCase): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.client.login.register_servlets, diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 106159fa65..02cef6f876 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -30,7 +30,6 @@ from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.test_utils import simple_async_mock from tests.test_utils.event_injection import inject_member_event from tests.unittest import HomeserverTestCase, override_config -from tests.utils import USE_POSTGRES_FOR_TESTS class ModuleApiTestCase(HomeserverTestCase): @@ -738,11 +737,6 @@ class ModuleApiTestCase(HomeserverTestCase): class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): """For testing ModuleApi functionality in a multi-worker setup""" - # Testing stream ID replication from the main to worker processes requires postgres - # (due to needing `MultiWriterIdGenerator`). - if not USE_POSTGRES_FOR_TESTS: - skip = "Requires Postgres" - servlets = [ admin.register_servlets, login.register_servlets, @@ -752,7 +746,6 @@ class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): def default_config(self): conf = super().default_config() - conf["redis"] = {"enabled": "true"} conf["stream_writers"] = {"presence": ["presence_writer"]} conf["instance_map"] = { "presence_writer": {"host": "testserv", "port": 1001}, diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 970d5e533b..ce53f808db 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -24,11 +24,11 @@ from synapse.http.site import SynapseRequest, SynapseSite from synapse.replication.http import ReplicationRestResource from synapse.replication.tcp.client import ReplicationDataHandler from synapse.replication.tcp.handler import ReplicationCommandHandler -from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol -from synapse.replication.tcp.resource import ( - ReplicationStreamProtocolFactory, +from synapse.replication.tcp.protocol import ( + ClientReplicationStreamProtocol, ServerReplicationStreamProtocol, ) +from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.server import HomeServer from tests import unittest @@ -220,15 +220,34 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): """Base class for tests running multiple workers. + Enables Redis, providing a fake Redis server. + Automatically handle HTTP replication requests from workers to master, unlike `BaseStreamTestCase`. """ + if not hiredis: + skip = "Requires hiredis" + + if not USE_POSTGRES_FOR_TESTS: + # Redis replication only takes place on Postgres + skip = "Requires Postgres" + + def default_config(self) -> Dict[str, Any]: + """ + Overrides the default config to enable Redis. + Even if the test only uses make_worker_hs, the main process needs Redis + enabled otherwise it won't create a Fake Redis server to listen on the + Redis port and accept fake TCP connections. + """ + base = super().default_config() + base["redis"] = {"enabled": True} + return base + def setUp(self): super().setUp() # build a replication server - self.server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = self.hs.get_replication_streamer() # Fake in memory Redis server that servers can connect to. @@ -247,15 +266,14 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): # handling inbound HTTP requests to that instance. self._hs_to_site = {self.hs: self.site} - if self.hs.config.redis.redis_enabled: - # Handle attempts to connect to fake redis server. - self.reactor.add_tcp_client_callback( - "localhost", - 6379, - self.connect_any_redis_attempts, - ) + # Handle attempts to connect to fake redis server. + self.reactor.add_tcp_client_callback( + "localhost", + 6379, + self.connect_any_redis_attempts, + ) - self.hs.get_replication_command_handler().start_replication(self.hs) + self.hs.get_replication_command_handler().start_replication(self.hs) # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't @@ -339,27 +357,6 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): store = worker_hs.get_datastores().main store.db_pool._db_pool = self.database_pool._db_pool - # Set up TCP replication between master and the new worker if we don't - # have Redis support enabled. - if not worker_hs.config.redis.redis_enabled: - repl_handler = ReplicationCommandHandler(worker_hs) - client = ClientReplicationStreamProtocol( - worker_hs, - "client", - "test", - self.clock, - repl_handler, - ) - server = self.server_factory.buildProtocol( - IPv4Address("TCP", "127.0.0.1", 0) - ) - - client_transport = FakeTransport(server, self.reactor) - client.makeConnection(client_transport) - - server_transport = FakeTransport(client, self.reactor) - server.makeConnection(server_transport) - # Set up a resource for the worker resource = ReplicationRestResource(worker_hs) @@ -378,8 +375,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): reactor=self.reactor, ) - if worker_hs.config.redis.redis_enabled: - worker_hs.get_replication_command_handler().start_replication(worker_hs) + worker_hs.get_replication_command_handler().start_replication(worker_hs) return worker_hs @@ -582,27 +578,3 @@ class FakeRedisPubSubProtocol(Protocol): def connectionLost(self, reason): self._server.remove_subscriber(self) - - -class RedisMultiWorkerStreamTestCase(BaseMultiWorkerStreamTestCase): - """ - A test case that enables Redis, providing a fake Redis server. - """ - - if not hiredis: - skip = "Requires hiredis" - - if not USE_POSTGRES_FOR_TESTS: - # Redis replication only takes place on Postgres - skip = "Requires Postgres" - - def default_config(self) -> Dict[str, Any]: - """ - Overrides the default config to enable Redis. - Even if the test only uses make_worker_hs, the main process needs Redis - enabled otherwise it won't create a Fake Redis server to listen on the - Redis port and accept fake TCP connections. - """ - base = super().default_config() - base["redis"] = {"enabled": True} - return base diff --git a/tests/replication/tcp/test_handler.py b/tests/replication/tcp/test_handler.py index e6a19eafd5..1e299d2d67 100644 --- a/tests/replication/tcp/test_handler.py +++ b/tests/replication/tcp/test_handler.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.replication._base import RedisMultiWorkerStreamTestCase +from tests.replication._base import BaseMultiWorkerStreamTestCase -class ChannelsTestCase(RedisMultiWorkerStreamTestCase): +class ChannelsTestCase(BaseMultiWorkerStreamTestCase): def test_subscribed_to_enough_redis_channels(self) -> None: # The default main process is subscribed to the USER_IP channel. self.assertCountEqual( diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index a7ca68069e..541d390286 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -20,7 +20,6 @@ from synapse.storage.util.id_generators import MultiWriterIdGenerator from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import make_request -from tests.utils import USE_POSTGRES_FOR_TESTS logger = logging.getLogger(__name__) @@ -28,11 +27,6 @@ logger = logging.getLogger(__name__) class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): """Checks event persisting sharding works""" - # Event persister sharding requires postgres (due to needing - # `MultiWriterIdGenerator`). - if not USE_POSTGRES_FOR_TESTS: - skip = "Requires Postgres" - servlets = [ admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -50,7 +44,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): def default_config(self): conf = super().default_config() - conf["redis"] = {"enabled": "true"} conf["stream_writers"] = {"events": ["worker1", "worker2"]} conf["instance_map"] = { "worker1": {"host": "testserv", "port": 1001}, -- cgit 1.4.1 From 40e3e68cd7c4ab6bf6d7ae228bacfb0c8ff6fa38 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 19 Aug 2022 16:52:20 +0100 Subject: Register homeserver modules when creating test homeserver (#13558) --- changelog.d/13558.misc | 1 + tests/events/test_presence_router.py | 4 ---- tests/handlers/test_password_providers.py | 11 ----------- tests/handlers/test_register.py | 7 ------- tests/server.py | 14 ++++++++++++++ 5 files changed, 15 insertions(+), 22 deletions(-) create mode 100644 changelog.d/13558.misc diff --git a/changelog.d/13558.misc b/changelog.d/13558.misc new file mode 100644 index 0000000000..e3f3e31740 --- /dev/null +++ b/changelog.d/13558.misc @@ -0,0 +1 @@ +Make `HomeServerTestCase` load any configured homeserver modules automatically. \ No newline at end of file diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index ffc3012a86..685a9a6d52 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -141,10 +141,6 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase): hs = self.setup_test_homeserver( federation_transport_client=fed_transport_client, ) - # Load the modules into the homeserver - module_api = hs.get_module_api() - for module, config in hs.config.modules.loaded_modules: - module(config=config, api=module_api) load_legacy_presence_router(hs) diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 4c62449c89..75934b1707 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -21,7 +21,6 @@ from unittest.mock import Mock import synapse from synapse.api.constants import LoginType from synapse.api.errors import Codes -from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.module_api import ModuleApi from synapse.rest.client import account, devices, login, logout, register from synapse.types import JsonDict, UserID @@ -167,16 +166,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() super().setUp() - def make_homeserver(self, reactor, clock): - hs = self.setup_test_homeserver() - # Load the modules into the homeserver - module_api = hs.get_module_api() - for module, config in hs.config.modules.loaded_modules: - module(config=config, api=module_api) - load_legacy_password_auth_providers(hs) - - return hs - @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) def test_password_only_auth_progiver_login_legacy(self): self.password_only_auth_provider_login_test_body() diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 23f35d5bf5..86b3d51975 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -22,7 +22,6 @@ from synapse.api.errors import ( ResourceLimitError, SynapseError, ) -from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import RoomAlias, RoomID, UserID, create_requester @@ -144,12 +143,6 @@ class RegistrationTestCase(unittest.HomeserverTestCase): config=hs_config, federation_client=self.mock_federation_client ) - load_legacy_spam_checkers(hs) - - module_api = hs.get_module_api() - for module, config in hs.config.modules.loaded_modules: - module(config=config, api=module_api) - return hs def prepare(self, reactor, clock, hs): diff --git a/tests/server.py b/tests/server.py index 9689e6a0cd..c447d5e4c4 100644 --- a/tests/server.py +++ b/tests/server.py @@ -61,6 +61,10 @@ from twisted.web.resource import IResource from twisted.web.server import Request, Site from synapse.config.database import DatabaseConnectionConfig +from synapse.events.presence_router import load_legacy_presence_router +from synapse.events.spamcheck import load_legacy_spam_checkers +from synapse.events.third_party_rules import load_legacy_third_party_event_rules +from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseRequest from synapse.logging.context import ContextResourceUsage from synapse.server import HomeServer @@ -913,4 +917,14 @@ def setup_test_homeserver( # Make the threadpool and database transactions synchronous for testing. _make_test_homeserver_synchronous(hs) + # Load any configured modules into the homeserver + module_api = hs.get_module_api() + for module, config in hs.config.modules.loaded_modules: + module(config=config, api=module_api) + + load_legacy_spam_checkers(hs) + load_legacy_third_party_event_rules(hs) + load_legacy_presence_router(hs) + load_legacy_password_auth_providers(hs) + return hs -- cgit 1.4.1 From f9f03426de338ae1879e174f63adf698bbfc3a4b Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 19 Aug 2022 17:17:10 +0100 Subject: Implement MSC3852: Expose `last_seen_user_agent` to users for their own devices; also expose to Admin API (#13549) --- changelog.d/13549.feature | 1 + changelog.d/13549.misc | 1 + docs/admin_api/user_admin_api.md | 7 +++ synapse/config/experimental.py | 3 ++ synapse/handlers/device.py | 9 +++- synapse/rest/client/devices.py | 27 ++++++++++++ tests/rest/admin/test_user.py | 92 +++++++++++++++++++++++++++++++++++++++- tests/unittest.py | 15 +++++++ 8 files changed, 153 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13549.feature create mode 100644 changelog.d/13549.misc diff --git a/changelog.d/13549.feature b/changelog.d/13549.feature new file mode 100644 index 0000000000..b6a726789c --- /dev/null +++ b/changelog.d/13549.feature @@ -0,0 +1 @@ +Add an experimental implementation for [MSC3852](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). \ No newline at end of file diff --git a/changelog.d/13549.misc b/changelog.d/13549.misc new file mode 100644 index 0000000000..5b4303e87e --- /dev/null +++ b/changelog.d/13549.misc @@ -0,0 +1 @@ +Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 0871cfebf5..c1ca0c8a64 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -753,6 +753,7 @@ A response body like the following is returned: "device_id": "QBUAZIFURK", "display_name": "android", "last_seen_ip": "1.2.3.4", + "last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0", "last_seen_ts": 1474491775024, "user_id": "" }, @@ -760,6 +761,7 @@ A response body like the following is returned: "device_id": "AUIECTSRND", "display_name": "ios", "last_seen_ip": "1.2.3.5", + "last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0", "last_seen_ts": 1474491775025, "user_id": "" } @@ -786,6 +788,8 @@ The following fields are returned in the JSON response body: Absent if no name has been set. - `last_seen_ip` - The IP address where this device was last seen. (May be a few minutes out of date, for efficiency reasons). + - `last_seen_user_agent` - The user agent of the device when it was last seen. + (May be a few minutes out of date, for efficiency reasons). - `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this devices was last seen. (May be a few minutes out of date, for efficiency reasons). - `user_id` - Owner of device. @@ -837,6 +841,7 @@ A response body like the following is returned: "device_id": "", "display_name": "android", "last_seen_ip": "1.2.3.4", + "last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0", "last_seen_ts": 1474491775024, "user_id": "" } @@ -858,6 +863,8 @@ The following fields are returned in the JSON response body: Absent if no name has been set. - `last_seen_ip` - The IP address where this device was last seen. (May be a few minutes out of date, for efficiency reasons). + - `last_seen_user_agent` - The user agent of the device when it was last seen. + (May be a few minutes out of date, for efficiency reasons). - `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this devices was last seen. (May be a few minutes out of date, for efficiency reasons). - `user_id` - Owner of device. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 7d17c958bb..c1ff417539 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -90,3 +90,6 @@ class ExperimentalConfig(Config): # MSC3848: Introduce errcodes for specific event sending failures self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False) + + # MSC3852: Expose last seen user agent field on /_matrix/client/v3/devices. + self.msc3852_enabled: bool = experimental.get("msc3852_enabled", False) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 1a8379854c..f5c586f657 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -74,6 +74,7 @@ class DeviceWorkerHandler: self._state_storage = hs.get_storage_controllers().state self._auth_handler = hs.get_auth_handler() self.server_name = hs.hostname + self._msc3852_enabled = hs.config.experimental.msc3852_enabled @trace async def get_devices_by_user(self, user_id: str) -> List[JsonDict]: @@ -747,7 +748,13 @@ def _update_device_from_client_ips( device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]] ) -> None: ip = client_ips.get((device["user_id"], device["device_id"]), {}) - device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")}) + device.update( + { + "last_seen_user_agent": ip.get("user_agent"), + "last_seen_ts": ip.get("last_seen"), + "last_seen_ip": ip.get("ip"), + } + ) class DeviceListUpdater: diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 6fab102437..ed6ce78d47 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -42,12 +42,26 @@ class DevicesRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() + self._msc3852_enabled = hs.config.experimental.msc3852_enabled async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) devices = await self.device_handler.get_devices_by_user( requester.user.to_string() ) + + # If MSC3852 is disabled, then the "last_seen_user_agent" field will be + # removed from each device. If it is enabled, then the field name will + # be replaced by the unstable identifier. + # + # When MSC3852 is accepted, this block of code can just be removed to + # expose "last_seen_user_agent" to clients. + for device in devices: + last_seen_user_agent = device["last_seen_user_agent"] + del device["last_seen_user_agent"] + if self._msc3852_enabled: + device["org.matrix.msc3852.last_seen_user_agent"] = last_seen_user_agent + return 200, {"devices": devices} @@ -108,6 +122,7 @@ class DeviceRestServlet(RestServlet): self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() self.auth_handler = hs.get_auth_handler() + self._msc3852_enabled = hs.config.experimental.msc3852_enabled async def on_GET( self, request: SynapseRequest, device_id: str @@ -118,6 +133,18 @@ class DeviceRestServlet(RestServlet): ) if device is None: raise NotFoundError("No device found") + + # If MSC3852 is disabled, then the "last_seen_user_agent" field will be + # removed from each device. If it is enabled, then the field name will + # be replaced by the unstable identifier. + # + # When MSC3852 is accepted, this block of code can just be removed to + # expose "last_seen_user_agent" to clients. + last_seen_user_agent = device["last_seen_user_agent"] + del device["last_seen_user_agent"] + if self._msc3852_enabled: + device["org.matrix.msc3852.last_seen_user_agent"] = last_seen_user_agent + return 200, device @interactive_auth_handler diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 411e4ec005..1afd082707 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1,4 +1,4 @@ -# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# Copyright 2018-2022 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -904,6 +904,96 @@ class UsersListTestCase(unittest.HomeserverTestCase): ) +class UserDevicesTestCase(unittest.HomeserverTestCase): + """ + Tests user device management-related Admin APIs. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + sync.register_servlets, + ] + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + # Set up an Admin user to query the Admin API with. + self.admin_user_id = self.register_user("admin", "pass", admin=True) + self.admin_user_token = self.login("admin", "pass") + + # Set up a test user to query the devices of. + self.other_user_device_id = "TESTDEVICEID" + self.other_user_device_display_name = "My Test Device" + self.other_user_client_ip = "1.2.3.4" + self.other_user_user_agent = "EquestriaTechnology/123.0" + + self.other_user_id = self.register_user("user", "pass", displayname="User1") + self.other_user_token = self.login( + "user", + "pass", + device_id=self.other_user_device_id, + additional_request_fields={ + "initial_device_display_name": self.other_user_device_display_name, + }, + ) + + # Have the "other user" make a request so that the "last_seen_*" fields are + # populated in the tests below. + channel = self.make_request( + "GET", + "/_matrix/client/v3/sync", + access_token=self.other_user_token, + client_ip=self.other_user_client_ip, + custom_headers=[ + ("User-Agent", self.other_user_user_agent), + ], + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + + def test_list_user_devices(self) -> None: + """ + Tests that a user's devices and attributes are listed correctly via the Admin API. + """ + # Request all devices of "other user" + channel = self.make_request( + "GET", + f"/_synapse/admin/v2/users/{self.other_user_id}/devices", + access_token=self.admin_user_token, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + + # Double-check we got the single device expected + user_devices = channel.json_body["devices"] + self.assertEqual(len(user_devices), 1) + self.assertEqual(channel.json_body["total"], 1) + + # Check that all the attributes of the device reported are as expected. + self._validate_attributes_of_device_response(user_devices[0]) + + # Request just a single device for "other user" by its ID + channel = self.make_request( + "GET", + f"/_synapse/admin/v2/users/{self.other_user_id}/devices/" + f"{self.other_user_device_id}", + access_token=self.admin_user_token, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + + # Check that all the attributes of the device reported are as expected. + self._validate_attributes_of_device_response(channel.json_body) + + def _validate_attributes_of_device_response(self, response: JsonDict) -> None: + # Check that all device expected attributes are present + self.assertEqual(response["user_id"], self.other_user_id) + self.assertEqual(response["device_id"], self.other_user_device_id) + self.assertEqual(response["display_name"], self.other_user_device_display_name) + self.assertEqual(response["last_seen_ip"], self.other_user_client_ip) + self.assertEqual(response["last_seen_user_agent"], self.other_user_user_agent) + self.assertIsInstance(response["last_seen_ts"], int) + self.assertGreater(response["last_seen_ts"], 0) + + class DeactivateAccountTestCase(unittest.HomeserverTestCase): servlets = [ diff --git a/tests/unittest.py b/tests/unittest.py index bec4a3d023..975b0a23a7 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -677,14 +677,29 @@ class HomeserverTestCase(TestCase): username: str, password: str, device_id: Optional[str] = None, + additional_request_fields: Optional[Dict[str, str]] = None, custom_headers: Optional[Iterable[CustomHeaderType]] = None, ) -> str: """ Log in a user, and get an access token. Requires the Login API be registered. + + Args: + username: The localpart to assign to the new user. + password: The password to assign to the new user. + device_id: An optional device ID to assign to the new device created during + login. + additional_request_fields: A dictionary containing any additional /login + request fields and their values. + custom_headers: Custom HTTP headers and values to add to the /login request. + + Returns: + The newly registered user's Matrix ID. """ body = {"type": "m.login.password", "user": username, "password": password} if device_id: body["device_id"] = device_id + if additional_request_fields: + body.update(additional_request_fields) channel = self.make_request( "POST", -- cgit 1.4.1 From 06df5d4250f54d5a95b0c90bfc9352ec6f02c520 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 19 Aug 2022 15:37:01 -0500 Subject: MSC2716v4 room version - remove namespace from MSC2716 event content fields (#13551) Complement PR: https://github.com/matrix-org/complement/pull/450 As suggested in https://github.com/matrix-org/matrix-spec-proposals/pull/2716#discussion_r941444525 --- changelog.d/13551.feature | 1 + synapse/api/constants.py | 6 +++--- synapse/api/room_versions.py | 38 ++++++++++++++++++------------------ synapse/events/utils.py | 2 +- synapse/handlers/federation_event.py | 2 +- 5 files changed, 25 insertions(+), 24 deletions(-) create mode 100644 changelog.d/13551.feature diff --git a/changelog.d/13551.feature b/changelog.d/13551.feature new file mode 100644 index 0000000000..365673a3c1 --- /dev/null +++ b/changelog.d/13551.feature @@ -0,0 +1 @@ +Add `org.matrix.msc2716v4` experimental room version with updated content fields. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 1d46fb0e43..c73aea622a 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -216,11 +216,11 @@ class EventContentFields: MSC2716_HISTORICAL: Final = "org.matrix.msc2716.historical" # For "insertion" events to indicate what the next batch ID should be in # order to connect to it - MSC2716_NEXT_BATCH_ID: Final = "org.matrix.msc2716.next_batch_id" + MSC2716_NEXT_BATCH_ID: Final = "next_batch_id" # Used on "batch" events to indicate which insertion event it connects to - MSC2716_BATCH_ID: Final = "org.matrix.msc2716.batch_id" + MSC2716_BATCH_ID: Final = "batch_id" # For "marker" events - MSC2716_MARKER_INSERTION: Final = "org.matrix.msc2716.marker.insertion" + MSC2716_INSERTION_EVENT_REFERENCE: Final = "insertion_event_reference" # The authorising user for joining a restricted room. AUTHORISING_USER: Final = "join_authorised_via_users_server" diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 00e81b3afc..a0e4ab6db6 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -269,24 +269,6 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, ) - MSC2716v3 = RoomVersion( - "org.matrix.msc2716v3", - RoomDisposition.UNSTABLE, - EventFormatVersions.V3, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=True, - msc2716_historical=True, - msc2716_redactions=True, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - ) MSC3787 = RoomVersion( "org.matrix.msc3787", RoomDisposition.UNSTABLE, @@ -323,6 +305,24 @@ class RoomVersions: msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, ) + MSC2716v4 = RoomVersion( + "org.matrix.msc2716v4", + RoomDisposition.UNSTABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2176_redaction_rules=False, + msc3083_join_rules=False, + msc3375_redaction_rules=False, + msc2403_knocking=True, + msc2716_historical=True, + msc2716_redactions=True, + msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, + ) KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { @@ -338,9 +338,9 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V7, RoomVersions.V8, RoomVersions.V9, - RoomVersions.MSC2716v3, RoomVersions.MSC3787, RoomVersions.V10, + RoomVersions.MSC2716v4, ) } diff --git a/synapse/events/utils.py b/synapse/events/utils.py index ac91c5eb57..71853caad8 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -161,7 +161,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH: add_fields(EventContentFields.MSC2716_BATCH_ID) elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER: - add_fields(EventContentFields.MSC2716_MARKER_INSERTION) + add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE) allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys} diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index f40b071a74..32326975a1 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1384,7 +1384,7 @@ class FederationEventHandler: logger.debug("_handle_marker_event: received %s", marker_event) insertion_event_id = marker_event.content.get( - EventContentFields.MSC2716_MARKER_INSERTION + EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE ) if insertion_event_id is None: -- cgit 1.4.1 From 94375f7a913f75fe0a93a3eda2bfe5060e975290 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 22 Aug 2022 10:03:11 +0100 Subject: Remove redundant opentracing spans for `/sendToDevice` and `/keys/upload` (#13574) --- changelog.d/13574.bugfix | 1 + synapse/rest/client/keys.py | 3 +-- synapse/rest/client/sendtodevice.py | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13574.bugfix diff --git a/changelog.d/13574.bugfix b/changelog.d/13574.bugfix new file mode 100644 index 0000000000..3899c137aa --- /dev/null +++ b/changelog.d/13574.bugfix @@ -0,0 +1 @@ +Fix the `opentracing.force_tracing_for_users` config option not applying to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. \ No newline at end of file diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index e3f454896a..a395694fa5 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -26,7 +26,7 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname +from synapse.logging.opentracing import log_kv, set_tag from synapse.types import JsonDict, StreamToken from ._base import client_patterns, interactive_auth_handler @@ -71,7 +71,6 @@ class KeyUploadServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() self.device_handler = hs.get_device_handler() - @trace_with_opname("upload_keys") async def on_POST( self, request: SynapseRequest, device_id: Optional[str] ) -> Tuple[int, JsonDict]: diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py index 1a8e9a96d4..46a8b03829 100644 --- a/synapse/rest/client/sendtodevice.py +++ b/synapse/rest/client/sendtodevice.py @@ -19,7 +19,7 @@ from synapse.http import servlet from synapse.http.server import HttpServer from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import set_tag, trace_with_opname +from synapse.logging.opentracing import set_tag from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict @@ -43,7 +43,6 @@ class SendToDeviceRestServlet(servlet.RestServlet): self.txns = HttpTransactionCache(hs) self.device_message_handler = hs.get_device_message_handler() - @trace_with_opname("sendToDevice") def on_PUT( self, request: SynapseRequest, message_type: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: -- cgit 1.4.1 From 3dd175b628bab5638165f20de9eade36a4e88147 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 22 Aug 2022 15:17:59 +0200 Subject: `synapse.api.auth.Auth` cleanup: make permission-related methods use `Requester` instead of the `UserID` (#13024) Part of #13019 This changes all the permission-related methods to rely on the Requester instead of the UserID. This is a first step towards enabling scoped access tokens at some point, since I expect the Requester to have scope-related informations in it. It also changes methods which figure out the user/device/appservice out of the access token to return a Requester instead of something else. This avoids having store-related objects in the methods signatures. --- changelog.d/13024.misc | 1 + synapse/api/auth.py | 202 +++++++++++------------ synapse/handlers/auth.py | 17 +- synapse/handlers/directory.py | 24 ++- synapse/handlers/initial_sync.py | 6 +- synapse/handlers/message.py | 23 +-- synapse/handlers/pagination.py | 2 +- synapse/handlers/register.py | 15 +- synapse/handlers/relations.py | 2 +- synapse/handlers/room.py | 4 +- synapse/handlers/room_member.py | 10 +- synapse/handlers/typing.py | 10 +- synapse/http/site.py | 2 +- synapse/rest/admin/_base.py | 10 +- synapse/rest/admin/media.py | 6 +- synapse/rest/admin/rooms.py | 12 +- synapse/rest/admin/users.py | 15 +- synapse/rest/client/profile.py | 4 +- synapse/rest/client/register.py | 3 - synapse/rest/client/room.py | 13 +- synapse/server_notices/server_notices_manager.py | 2 +- synapse/storage/databases/main/registration.py | 2 +- tests/api/test_auth.py | 8 +- tests/handlers/test_typing.py | 8 +- tests/rest/client/test_retention.py | 4 +- tests/rest/client/test_shadow_banned.py | 6 +- 26 files changed, 203 insertions(+), 208 deletions(-) create mode 100644 changelog.d/13024.misc diff --git a/changelog.d/13024.misc b/changelog.d/13024.misc new file mode 100644 index 0000000000..aa43c82429 --- /dev/null +++ b/changelog.d/13024.misc @@ -0,0 +1 @@ +Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 523bad0c55..9a1aea083f 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -37,8 +37,7 @@ from synapse.logging.opentracing import ( start_active_span, trace, ) -from synapse.storage.databases.main.registration import TokenLookupResult -from synapse.types import Requester, UserID, create_requester +from synapse.types import Requester, create_requester if TYPE_CHECKING: from synapse.server import HomeServer @@ -70,14 +69,14 @@ class Auth: async def check_user_in_room( self, room_id: str, - user_id: str, + requester: Requester, allow_departed_users: bool = False, ) -> Tuple[str, Optional[str]]: """Check if the user is in the room, or was at some point. Args: room_id: The room to check. - user_id: The user to check. + requester: The user making the request, according to the access token. current_state: Optional map of the current state of the room. If provided then that map is used to check whether they are a @@ -94,6 +93,7 @@ class Auth: membership event ID of the user. """ + user_id = requester.user.to_string() ( membership, member_event_id, @@ -182,96 +182,69 @@ class Auth: access_token = self.get_access_token_from_request(request) - ( - user_id, - device_id, - app_service, - ) = await self._get_appservice_user_id_and_device_id(request) - if user_id and app_service: - if ip_addr and self._track_appservice_user_ips: - await self.store.insert_client_ip( - user_id=user_id, - access_token=access_token, - ip=ip_addr, - user_agent=user_agent, - device_id="dummy-device" - if device_id is None - else device_id, # stubbed - ) - - requester = create_requester( - user_id, app_service=app_service, device_id=device_id + # First check if it could be a request from an appservice + requester = await self._get_appservice_user(request) + if not requester: + # If not, it should be from a regular user + requester = await self.get_user_by_access_token( + access_token, allow_expired=allow_expired ) - request.requester = user_id - return requester - - user_info = await self.get_user_by_access_token( - access_token, allow_expired=allow_expired - ) - token_id = user_info.token_id - is_guest = user_info.is_guest - shadow_banned = user_info.shadow_banned - - # Deny the request if the user account has expired. - if not allow_expired: - if await self._account_validity_handler.is_user_expired( - user_info.user_id - ): - # Raise the error if either an account validity module has determined - # the account has expired, or the legacy account validity - # implementation is enabled and determined the account has expired - raise AuthError( - 403, - "User account has expired", - errcode=Codes.EXPIRED_ACCOUNT, - ) - - device_id = user_info.device_id - - if access_token and ip_addr: + # Deny the request if the user account has expired. + # This check is only done for regular users, not appservice ones. + if not allow_expired: + if await self._account_validity_handler.is_user_expired( + requester.user.to_string() + ): + # Raise the error if either an account validity module has determined + # the account has expired, or the legacy account validity + # implementation is enabled and determined the account has expired + raise AuthError( + 403, + "User account has expired", + errcode=Codes.EXPIRED_ACCOUNT, + ) + + if ip_addr and ( + not requester.app_service or self._track_appservice_user_ips + ): + # XXX(quenting): I'm 95% confident that we could skip setting the + # device_id to "dummy-device" for appservices, and that the only impact + # would be some rows which whould not deduplicate in the 'user_ips' + # table during the transition + recorded_device_id = ( + "dummy-device" + if requester.device_id is None and requester.app_service is not None + else requester.device_id + ) await self.store.insert_client_ip( - user_id=user_info.token_owner, + user_id=requester.authenticated_entity, access_token=access_token, ip=ip_addr, user_agent=user_agent, - device_id=device_id, + device_id=recorded_device_id, ) + # Track also the puppeted user client IP if enabled and the user is puppeting if ( - user_info.user_id != user_info.token_owner + requester.user.to_string() != requester.authenticated_entity and self._track_puppeted_user_ips ): await self.store.insert_client_ip( - user_id=user_info.user_id, + user_id=requester.user.to_string(), access_token=access_token, ip=ip_addr, user_agent=user_agent, - device_id=device_id, + device_id=requester.device_id, ) - if is_guest and not allow_guest: + if requester.is_guest and not allow_guest: raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN, ) - # Mark the token as used. This is used to invalidate old refresh - # tokens after some time. - if not user_info.token_used and token_id is not None: - await self.store.mark_access_token_as_used(token_id) - - requester = create_requester( - user_info.user_id, - token_id, - is_guest, - shadow_banned, - device_id, - app_service=app_service, - authenticated_entity=user_info.token_owner, - ) - request.requester = requester return requester except KeyError: @@ -308,9 +281,7 @@ class Auth: 403, "Application service has not registered this user (%s)" % user_id ) - async def _get_appservice_user_id_and_device_id( - self, request: Request - ) -> Tuple[Optional[str], Optional[str], Optional[ApplicationService]]: + async def _get_appservice_user(self, request: Request) -> Optional[Requester]: """ Given a request, reads the request parameters to determine: - whether it's an application service that's making this request @@ -325,15 +296,13 @@ class Auth: Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. Returns: - 3-tuple of - (user ID?, device ID?, application service?) + the application service `Requester` of that request Postconditions: - - If an application service is returned, so is a user ID - - A user ID is never returned without an application service - - A device ID is never returned without a user ID or an application service - - The returned application service, if present, is permitted to control the - returned user ID. + - The `app_service` field in the returned `Requester` is set + - The `user_id` field in the returned `Requester` is either the application + service sender or the controlled user set by the `user_id` URI parameter + - The returned application service is permitted to control the returned user ID. - The returned device ID, if present, has been checked to be a valid device ID for the returned user ID. """ @@ -343,12 +312,12 @@ class Auth: self.get_access_token_from_request(request) ) if app_service is None: - return None, None, None + return None if app_service.ip_range_whitelist: ip_address = IPAddress(request.getClientAddress().host) if ip_address not in app_service.ip_range_whitelist: - return None, None, None + return None # This will always be set by the time Twisted calls us. assert request.args is not None @@ -382,13 +351,15 @@ class Auth: Codes.EXCLUSIVE, ) - return effective_user_id, effective_device_id, app_service + return create_requester( + effective_user_id, app_service=app_service, device_id=effective_device_id + ) async def get_user_by_access_token( self, token: str, allow_expired: bool = False, - ) -> TokenLookupResult: + ) -> Requester: """Validate access token and get user_id from it Args: @@ -405,9 +376,9 @@ class Auth: # First look in the database to see if the access token is present # as an opaque token. - r = await self.store.get_user_by_access_token(token) - if r: - valid_until_ms = r.valid_until_ms + user_info = await self.store.get_user_by_access_token(token) + if user_info: + valid_until_ms = user_info.valid_until_ms if ( not allow_expired and valid_until_ms is not None @@ -419,7 +390,20 @@ class Auth: msg="Access token has expired", soft_logout=True ) - return r + # Mark the token as used. This is used to invalidate old refresh + # tokens after some time. + await self.store.mark_access_token_as_used(user_info.token_id) + + requester = create_requester( + user_id=user_info.user_id, + access_token_id=user_info.token_id, + is_guest=user_info.is_guest, + shadow_banned=user_info.shadow_banned, + device_id=user_info.device_id, + authenticated_entity=user_info.token_owner, + ) + + return requester # If the token isn't found in the database, then it could still be a # macaroon for a guest, so we check that here. @@ -445,11 +429,12 @@ class Auth: "Guest access token used for regular user" ) - return TokenLookupResult( + return create_requester( user_id=user_id, is_guest=True, # all guests get the same device id device_id=GUEST_DEVICE_ID, + authenticated_entity=user_id, ) except ( pymacaroons.exceptions.MacaroonException, @@ -472,32 +457,33 @@ class Auth: request.requester = create_requester(service.sender, app_service=service) return service - async def is_server_admin(self, user: UserID) -> bool: + async def is_server_admin(self, requester: Requester) -> bool: """Check if the given user is a local server admin. Args: - user: user to check + requester: The user making the request, according to the access token. Returns: True if the user is an admin """ - return await self.store.is_server_admin(user) + return await self.store.is_server_admin(requester.user) - async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool: + async def check_can_change_room_list( + self, room_id: str, requester: Requester + ) -> bool: """Determine whether the user is allowed to edit the room's entry in the published room list. Args: - room_id - user + room_id: The room to check. + requester: The user making the request, according to the access token. """ - is_admin = await self.is_server_admin(user) + is_admin = await self.is_server_admin(requester) if is_admin: return True - user_id = user.to_string() - await self.check_user_in_room(room_id, user_id) + await self.check_user_in_room(room_id, requester) # We currently require the user is a "moderator" in the room. We do this # by checking if they would (theoretically) be able to change the @@ -516,7 +502,9 @@ class Auth: send_level = event_auth.get_send_level( EventTypes.CanonicalAlias, "", power_level_event ) - user_level = event_auth.get_user_power_level(user_id, auth_events) + user_level = event_auth.get_user_power_level( + requester.user.to_string(), auth_events + ) return user_level >= send_level @@ -574,16 +562,16 @@ class Auth: @trace async def check_user_in_room_or_world_readable( - self, room_id: str, user_id: str, allow_departed_users: bool = False + self, room_id: str, requester: Requester, allow_departed_users: bool = False ) -> Tuple[str, Optional[str]]: """Checks that the user is or was in the room or the room is world readable. If it isn't then an exception is raised. Args: - room_id: room to check - user_id: user to check - allow_departed_users: if True, accept users that were previously - members but have now departed + room_id: The room to check. + requester: The user making the request, according to the access token. + allow_departed_users: If True, accept users that were previously + members but have now departed. Returns: Resolves to the current membership of the user in the room and the @@ -598,7 +586,7 @@ class Auth: # * The user is a guest user, and has joined the room # else it will throw. return await self.check_user_in_room( - room_id, user_id, allow_departed_users=allow_departed_users + room_id, requester, allow_departed_users=allow_departed_users ) except AuthError: visibility = await self._storage_controllers.state.get_current_state_event( @@ -613,6 +601,6 @@ class Auth: raise UnstableSpecAuthError( 403, "User %s not in room %s, and room previews are disabled" - % (user_id, room_id), + % (requester.user, room_id), errcode=Codes.NOT_JOINED, ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index bfa5535044..0327fc57a4 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -280,7 +280,7 @@ class AuthHandler: that it isn't stolen by re-authenticating them. Args: - requester: The user, as given by the access token + requester: The user making the request, according to the access token. request: The request sent by the client. @@ -1435,20 +1435,25 @@ class AuthHandler: access_token: access token to be deleted """ - user_info = await self.auth.get_user_by_access_token(access_token) + token = await self.store.get_user_by_access_token(access_token) + if not token: + # At this point, the token should already have been fetched once by + # the caller, so this should not happen, unless of a race condition + # between two delete requests + raise SynapseError(HTTPStatus.UNAUTHORIZED, "Unrecognised access token") await self.store.delete_access_token(access_token) # see if any modules want to know about this await self.password_auth_provider.on_logged_out( - user_id=user_info.user_id, - device_id=user_info.device_id, + user_id=token.user_id, + device_id=token.device_id, access_token=access_token, ) # delete pushers associated with this access token - if user_info.token_id is not None: + if token.token_id is not None: await self.hs.get_pusherpool().remove_pushers_by_access_token( - user_info.user_id, (user_info.token_id,) + token.user_id, (token.token_id,) ) async def delete_access_tokens_for_user( diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 09a7a4b238..948f66a94d 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -30,7 +30,7 @@ from synapse.api.errors import ( from synapse.appservice import ApplicationService from synapse.module_api import NOT_SPAM from synapse.storage.databases.main.directory import RoomAliasMapping -from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id +from synapse.types import JsonDict, Requester, RoomAlias, get_domain_from_id if TYPE_CHECKING: from synapse.server import HomeServer @@ -133,7 +133,7 @@ class DirectoryHandler: else: # Server admins are not subject to the same constraints as normal # users when creating an alias (e.g. being in the room). - is_admin = await self.auth.is_server_admin(requester.user) + is_admin = await self.auth.is_server_admin(requester) if (self.require_membership and check_membership) and not is_admin: rooms_for_user = await self.store.get_rooms_for_user(user_id) @@ -197,7 +197,7 @@ class DirectoryHandler: user_id = requester.user.to_string() try: - can_delete = await self._user_can_delete_alias(room_alias, user_id) + can_delete = await self._user_can_delete_alias(room_alias, requester) except StoreError as e: if e.code == 404: raise NotFoundError("Unknown room alias") @@ -400,7 +400,9 @@ class DirectoryHandler: # either no interested services, or no service with an exclusive lock return True - async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool: + async def _user_can_delete_alias( + self, alias: RoomAlias, requester: Requester + ) -> bool: """Determine whether a user can delete an alias. One of the following must be true: @@ -413,7 +415,7 @@ class DirectoryHandler: """ creator = await self.store.get_room_alias_creator(alias.to_string()) - if creator == user_id: + if creator == requester.user.to_string(): return True # Resolve the alias to the corresponding room. @@ -422,9 +424,7 @@ class DirectoryHandler: if not room_id: return False - return await self.auth.check_can_change_room_list( - room_id, UserID.from_string(user_id) - ) + return await self.auth.check_can_change_room_list(room_id, requester) async def edit_published_room_list( self, requester: Requester, room_id: str, visibility: str @@ -463,7 +463,7 @@ class DirectoryHandler: raise SynapseError(400, "Unknown room") can_change_room_list = await self.auth.check_can_change_room_list( - room_id, requester.user + room_id, requester ) if not can_change_room_list: raise AuthError( @@ -528,10 +528,8 @@ class DirectoryHandler: Get a list of the aliases that currently point to this room on this server """ # allow access to server admins and current members of the room - is_admin = await self.auth.is_server_admin(requester.user) + is_admin = await self.auth.is_server_admin(requester) if not is_admin: - await self.auth.check_user_in_room_or_world_readable( - room_id, requester.user.to_string() - ) + await self.auth.check_user_in_room_or_world_readable(room_id, requester) return await self.store.get_aliases_for_room(room_id) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 6484e47e5f..860c82c110 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -309,18 +309,18 @@ class InitialSyncHandler: if blocked: raise SynapseError(403, "This room has been blocked on this server") - user_id = requester.user.to_string() - ( membership, member_event_id, ) = await self.auth.check_user_in_room_or_world_readable( room_id, - user_id, + requester, allow_departed_users=True, ) is_peeking = member_event_id is None + user_id = requester.user.to_string() + if membership == Membership.JOIN: result = await self._room_initial_sync_joined( user_id, room_id, pagin_config, membership, is_peeking diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8f29ee9a87..acd3de06f6 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -104,7 +104,7 @@ class MessageHandler: async def get_room_data( self, - user_id: str, + requester: Requester, room_id: str, event_type: str, state_key: str, @@ -112,7 +112,7 @@ class MessageHandler: """Get data from a room. Args: - user_id + requester: The user who did the request. room_id event_type state_key @@ -125,7 +125,7 @@ class MessageHandler: membership, membership_event_id, ) = await self.auth.check_user_in_room_or_world_readable( - room_id, user_id, allow_departed_users=True + room_id, requester, allow_departed_users=True ) if membership == Membership.JOIN: @@ -161,11 +161,10 @@ class MessageHandler: async def get_state_events( self, - user_id: str, + requester: Requester, room_id: str, state_filter: Optional[StateFilter] = None, at_token: Optional[StreamToken] = None, - is_guest: bool = False, ) -> List[dict]: """Retrieve all state events for a given room. If the user is joined to the room then return the current state. If the user has @@ -174,14 +173,13 @@ class MessageHandler: visible. Args: - user_id: The user requesting state events. + requester: The user requesting state events. room_id: The room ID to get all state events from. state_filter: The state filter used to fetch state from the database. at_token: the stream token of the at which we are requesting the stats. If the user is not allowed to view the state as of that stream token, we raise a 403 SynapseError. If None, returns the current state based on the current_state_events table. - is_guest: whether this user is a guest Returns: A list of dicts representing state events. [{}, {}, {}] Raises: @@ -191,6 +189,7 @@ class MessageHandler: members of this room. """ state_filter = state_filter or StateFilter.all() + user_id = requester.user.to_string() if at_token: last_event_id = ( @@ -223,7 +222,7 @@ class MessageHandler: membership, membership_event_id, ) = await self.auth.check_user_in_room_or_world_readable( - room_id, user_id, allow_departed_users=True + room_id, requester, allow_departed_users=True ) if membership == Membership.JOIN: @@ -317,12 +316,11 @@ class MessageHandler: Returns: A dict of user_id to profile info """ - user_id = requester.user.to_string() if not requester.app_service: # We check AS auth after fetching the room membership, as it # requires us to pull out all joined members anyway. membership, _ = await self.auth.check_user_in_room_or_world_readable( - room_id, user_id, allow_departed_users=True + room_id, requester, allow_departed_users=True ) if membership != Membership.JOIN: raise SynapseError( @@ -340,7 +338,10 @@ class MessageHandler: # If this is an AS, double check that they are allowed to see the members. # This can either be because the AS user is in the room or because there # is a user in the room that the AS is "interested in" - if requester.app_service and user_id not in users_with_profile: + if ( + requester.app_service + and requester.user.to_string() not in users_with_profile + ): for uid in users_with_profile: if requester.app_service.is_interested_in_user(uid): break diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index e1e34e3b16..74e944bce7 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -464,7 +464,7 @@ class PaginationHandler: membership, member_event_id, ) = await self.auth.check_user_in_room_or_world_readable( - room_id, user_id, allow_departed_users=True + room_id, requester, allow_departed_users=True ) if pagin_config.direction == "b": diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c77d181722..20ec22105a 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -29,7 +29,13 @@ from synapse.api.constants import ( JoinRules, LoginType, ) -from synapse.api.errors import AuthError, Codes, ConsentNotGivenError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + ConsentNotGivenError, + InvalidClientTokenError, + SynapseError, +) from synapse.appservice import ApplicationService from synapse.config.server import is_threepid_reserved from synapse.http.servlet import assert_params_in_dict @@ -180,10 +186,7 @@ class RegistrationHandler: ) if guest_access_token: user_data = await self.auth.get_user_by_access_token(guest_access_token) - if ( - not user_data.is_guest - or UserID.from_string(user_data.user_id).localpart != localpart - ): + if not user_data.is_guest or user_data.user.localpart != localpart: raise AuthError( 403, "Cannot register taken user ID without valid guest " @@ -618,7 +621,7 @@ class RegistrationHandler: user_id = user.to_string() service = self.store.get_app_service_by_token(as_token) if not service: - raise AuthError(403, "Invalid application service token.") + raise InvalidClientTokenError() if not service.is_interested_in_user(user_id): raise SynapseError( 400, diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 72d25df8c8..28d7093f08 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -103,7 +103,7 @@ class RelationsHandler: # TODO Properly handle a user leaving a room. (_, member_event_id) = await self._auth.check_user_in_room_or_world_readable( - room_id, user_id, allow_departed_users=True + room_id, requester, allow_departed_users=True ) # This gets the original event and checks that a) the event exists and diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 55395457c3..2bf0ebd025 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -721,7 +721,7 @@ class RoomCreationHandler: # allow the server notices mxid to create rooms is_requester_admin = True else: - is_requester_admin = await self.auth.is_server_admin(requester.user) + is_requester_admin = await self.auth.is_server_admin(requester) # Let the third party rules modify the room creation config if needed, or abort # the room creation entirely with an exception. @@ -1279,7 +1279,7 @@ class RoomContextHandler: """ user = requester.user if use_admin_priviledge: - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) before_limit = math.floor(limit / 2.0) after_limit = limit - before_limit diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 70dc69c809..d1909665d6 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -179,7 +179,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): """Try and join a room that this server is not in Args: - requester + requester: The user making the request, according to the access token. remote_room_hosts: List of servers that can be used to join via. room_id: Room that we are trying to join user: User who is trying to join @@ -744,7 +744,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): is_requester_admin = True else: - is_requester_admin = await self.auth.is_server_admin(requester.user) + is_requester_admin = await self.auth.is_server_admin(requester) if not is_requester_admin: if self.config.server.block_non_admin_invites: @@ -868,7 +868,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): bypass_spam_checker = True else: - bypass_spam_checker = await self.auth.is_server_admin(requester.user) + bypass_spam_checker = await self.auth.is_server_admin(requester) inviter = await self._get_inviter(target.to_string(), room_id) if ( @@ -1410,7 +1410,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ShadowBanError if the requester has been shadow-banned. """ if self.config.server.block_non_admin_invites: - is_requester_admin = await self.auth.is_server_admin(requester.user) + is_requester_admin = await self.auth.is_server_admin(requester) if not is_requester_admin: raise SynapseError( 403, "Invites have been disabled on this server", Codes.FORBIDDEN @@ -1693,7 +1693,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): check_complexity and self.hs.config.server.limit_remote_rooms.admins_can_join ): - check_complexity = not await self.auth.is_server_admin(user) + check_complexity = not await self.store.is_server_admin(user) if check_complexity: # Fetch the room complexity diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 27aa0d3126..bcac3372a2 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -253,12 +253,11 @@ class TypingWriterHandler(FollowerTypingHandler): self, target_user: UserID, requester: Requester, room_id: str, timeout: int ) -> None: target_user_id = target_user.to_string() - auth_user_id = requester.user.to_string() if not self.is_mine_id(target_user_id): raise SynapseError(400, "User is not hosted on this homeserver") - if target_user_id != auth_user_id: + if target_user != requester.user: raise AuthError(400, "Cannot set another user's typing state") if requester.shadow_banned: @@ -266,7 +265,7 @@ class TypingWriterHandler(FollowerTypingHandler): await self.clock.sleep(random.randint(1, 10)) raise ShadowBanError() - await self.auth.check_user_in_room(room_id, target_user_id) + await self.auth.check_user_in_room(room_id, requester) logger.debug("%s has started typing in %s", target_user_id, room_id) @@ -289,12 +288,11 @@ class TypingWriterHandler(FollowerTypingHandler): self, target_user: UserID, requester: Requester, room_id: str ) -> None: target_user_id = target_user.to_string() - auth_user_id = requester.user.to_string() if not self.is_mine_id(target_user_id): raise SynapseError(400, "User is not hosted on this homeserver") - if target_user_id != auth_user_id: + if target_user != requester.user: raise AuthError(400, "Cannot set another user's typing state") if requester.shadow_banned: @@ -302,7 +300,7 @@ class TypingWriterHandler(FollowerTypingHandler): await self.clock.sleep(random.randint(1, 10)) raise ShadowBanError() - await self.auth.check_user_in_room(room_id, target_user_id) + await self.auth.check_user_in_room(room_id, requester) logger.debug("%s has stopped typing in %s", target_user_id, room_id) diff --git a/synapse/http/site.py b/synapse/http/site.py index eeec74b78a..1155f3f610 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -226,7 +226,7 @@ class SynapseRequest(Request): # If this is a request where the target user doesn't match the user who # authenticated (e.g. and admin is puppetting a user) then we return both. - if self._requester.user.to_string() != authenticated_entity: + if requester != authenticated_entity: return requester, authenticated_entity return requester, None diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index 399b205aaf..b467a61dfb 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -19,7 +19,7 @@ from typing import Iterable, Pattern from synapse.api.auth import Auth from synapse.api.errors import AuthError from synapse.http.site import SynapseRequest -from synapse.types import UserID +from synapse.types import Requester def admin_patterns(path_regex: str, version: str = "v1") -> Iterable[Pattern]: @@ -48,19 +48,19 @@ async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None AuthError if the requester is not a server admin """ requester = await auth.get_user_by_req(request) - await assert_user_is_admin(auth, requester.user) + await assert_user_is_admin(auth, requester) -async def assert_user_is_admin(auth: Auth, user_id: UserID) -> None: +async def assert_user_is_admin(auth: Auth, requester: Requester) -> None: """Verify that the given user is an admin user Args: auth: Auth singleton - user_id: user to check + requester: The user making the request, according to the access token. Raises: AuthError if the user is not a server admin """ - is_admin = await auth.is_server_admin(user_id) + is_admin = await auth.is_server_admin(requester) if not is_admin: raise AuthError(HTTPStatus.FORBIDDEN, "You are not a server admin") diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 19d4a008e8..73470f09ae 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -54,7 +54,7 @@ class QuarantineMediaInRoom(RestServlet): self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) logging.info("Quarantining room: %s", room_id) @@ -81,7 +81,7 @@ class QuarantineMediaByUser(RestServlet): self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) logging.info("Quarantining media by user: %s", user_id) @@ -110,7 +110,7 @@ class QuarantineMediaByID(RestServlet): self, request: SynapseRequest, server_name: str, media_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) logging.info("Quarantining media by ID: %s/%s", server_name, media_id) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 68054ffc28..3d870629c4 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -75,7 +75,7 @@ class RoomRestV2Servlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) - await assert_user_is_admin(self._auth, requester.user) + await assert_user_is_admin(self._auth, requester) content = parse_json_object_from_request(request) @@ -327,7 +327,7 @@ class RoomRestServlet(RestServlet): pagination_handler: "PaginationHandler", ) -> Tuple[int, JsonDict]: requester = await auth.get_user_by_req(request) - await assert_user_is_admin(auth, requester.user) + await assert_user_is_admin(auth, requester) content = parse_json_object_from_request(request) @@ -461,7 +461,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, RestServlet): assert request.args is not None requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) content = parse_json_object_from_request(request) @@ -551,7 +551,7 @@ class MakeRoomAdminRestServlet(ResolveRoomIdMixin, RestServlet): self, request: SynapseRequest, room_identifier: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) content = parse_json_object_from_request(request, allow_empty_body=True) room_id, _ = await self.resolve_room_id(room_identifier) @@ -742,7 +742,7 @@ class RoomEventContextServlet(RestServlet): self, request: SynapseRequest, room_id: str, event_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=False) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) limit = parse_integer(request, "limit", default=10) @@ -834,7 +834,7 @@ class BlockRoomRestServlet(RestServlet): self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) - await assert_user_is_admin(self._auth, requester.user) + await assert_user_is_admin(self._auth, requester) content = parse_json_object_from_request(request) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index ba2f7fa6d8..78ee9b6532 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -183,7 +183,7 @@ class UserRestServletV2(RestServlet): self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) target_user = UserID.from_string(user_id) body = parse_json_object_from_request(request) @@ -575,10 +575,9 @@ class WhoisRestServlet(RestServlet): ) -> Tuple[int, JsonDict]: target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) - auth_user = requester.user - if target_user != auth_user: - await assert_user_is_admin(self.auth, auth_user) + if target_user != requester.user: + await assert_user_is_admin(self.auth, requester) if not self.is_mine(target_user): raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only whois a local user") @@ -601,7 +600,7 @@ class DeactivateAccountRestServlet(RestServlet): self, request: SynapseRequest, target_user_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) if not self.is_mine(UserID.from_string(target_user_id)): raise SynapseError( @@ -693,7 +692,7 @@ class ResetPasswordRestServlet(RestServlet): This needs user to have administrator access in Synapse. """ requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) UserID.from_string(target_user_id) @@ -807,7 +806,7 @@ class UserAdminServlet(RestServlet): self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) auth_user = requester.user target_user = UserID.from_string(user_id) @@ -921,7 +920,7 @@ class UserTokenRestServlet(RestServlet): self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) + await assert_user_is_admin(self.auth, requester) auth_user = requester.user if not self.is_mine_id(user_id): diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py index c16d707909..e69fa0829d 100644 --- a/synapse/rest/client/profile.py +++ b/synapse/rest/client/profile.py @@ -66,7 +66,7 @@ class ProfileDisplaynameRestServlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user = UserID.from_string(user_id) - is_admin = await self.auth.is_server_admin(requester.user) + is_admin = await self.auth.is_server_admin(requester) content = parse_json_object_from_request(request) @@ -123,7 +123,7 @@ class ProfileAvatarURLRestServlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user = UserID.from_string(user_id) - is_admin = await self.auth.is_server_admin(requester.user) + is_admin = await self.auth.is_server_admin(requester) content = parse_json_object_from_request(request) try: diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 956c45e60a..1b953d3fa0 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -484,9 +484,6 @@ class RegisterRestServlet(RestServlet): "Appservice token must be provided when using a type of m.login.application_service", ) - # Verify the AS - self.auth.get_appservice_by_req(request) - # Set the desired user according to the AS API (which uses the # 'user' key not 'username'). Since this is a new addition, we'll # fallback to 'username' if they gave one. diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 13bc9482c5..0eafbae457 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -229,7 +229,7 @@ class RoomStateEventRestServlet(TransactionRestServlet): msg_handler = self.message_handler data = await msg_handler.get_room_data( - user_id=requester.user.to_string(), + requester=requester, room_id=room_id, event_type=event_type, state_key=state_key, @@ -574,7 +574,7 @@ class RoomMemberListRestServlet(RestServlet): events = await handler.get_state_events( room_id=room_id, - user_id=requester.user.to_string(), + requester=requester, at_token=at_token, state_filter=StateFilter.from_types([(EventTypes.Member, None)]), ) @@ -696,8 +696,7 @@ class RoomStateRestServlet(RestServlet): # Get all the current state for this room events = await self.message_handler.get_state_events( room_id=room_id, - user_id=requester.user.to_string(), - is_guest=requester.is_guest, + requester=requester, ) return 200, events @@ -755,7 +754,7 @@ class RoomEventServlet(RestServlet): == "true" ) if include_unredacted_content and not await self.auth.is_server_admin( - requester.user + requester ): power_level_event = ( await self._storage_controllers.state.get_current_state_event( @@ -1260,9 +1259,7 @@ class TimestampLookupRestServlet(RestServlet): self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) - await self._auth.check_user_in_room_or_world_readable( - room_id, requester.user.to_string() - ) + await self._auth.check_user_in_room_or_world_readable(room_id, requester) timestamp = parse_integer(request, "ts", required=True) direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 8ecab86ec7..70d054a8f4 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -244,7 +244,7 @@ class ServerNoticesManager: assert self.server_notices_mxid is not None notice_user_data_in_room = await self._message_handler.get_room_data( - self.server_notices_mxid, + create_requester(self.server_notices_mxid), room_id, EventTypes.Member, self.server_notices_mxid, diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index cb63cd9b7d..7fb9c801da 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -69,9 +69,9 @@ class TokenLookupResult: """ user_id: str + token_id: int is_guest: bool = False shadow_banned: bool = False - token_id: Optional[int] = None device_id: Optional[str] = None valid_until_ms: Optional[int] = None token_owner: str = attr.ib() diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index dfcfaf79b6..e0f363555b 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -284,10 +284,13 @@ class AuthTestCase(unittest.HomeserverTestCase): TokenLookupResult( user_id="@baldrick:matrix.org", device_id="device", + token_id=5, token_owner="@admin:matrix.org", + token_used=True, ) ) self.store.insert_client_ip = simple_async_mock(None) + self.store.mark_access_token_as_used = simple_async_mock(None) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] @@ -301,10 +304,13 @@ class AuthTestCase(unittest.HomeserverTestCase): TokenLookupResult( user_id="@baldrick:matrix.org", device_id="device", + token_id=5, token_owner="@admin:matrix.org", + token_used=True, ) ) self.store.insert_client_ip = simple_async_mock(None) + self.store.mark_access_token_as_used = simple_async_mock(None) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] @@ -347,7 +353,7 @@ class AuthTestCase(unittest.HomeserverTestCase): serialized = macaroon.serialize() user_info = self.get_success(self.auth.get_user_by_access_token(serialized)) - self.assertEqual(user_id, user_info.user_id) + self.assertEqual(user_id, user_info.user.to_string()) self.assertTrue(user_info.is_guest) self.store.get_user_by_id.assert_called_with(user_id) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 7af1333126..8adba29d7f 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -25,7 +25,7 @@ from synapse.api.constants import EduTypes from synapse.api.errors import AuthError from synapse.federation.transport.server import TransportLayerServer from synapse.server import HomeServer -from synapse.types import JsonDict, UserID, create_requester +from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock from tests import unittest @@ -117,8 +117,10 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.room_members = [] - async def check_user_in_room(room_id: str, user_id: str) -> None: - if user_id not in [u.to_string() for u in self.room_members]: + async def check_user_in_room(room_id: str, requester: Requester) -> None: + if requester.user.to_string() not in [ + u.to_string() for u in self.room_members + ]: raise AuthError(401, "User is not in the room") return None diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index ac9c113354..9c8c1889d3 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -20,7 +20,7 @@ from synapse.api.constants import EventTypes from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer -from synapse.types import JsonDict +from synapse.types import JsonDict, create_requester from synapse.util import Clock from synapse.visibility import filter_events_for_client @@ -188,7 +188,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): message_handler = self.hs.get_message_handler() create_event = self.get_success( message_handler.get_room_data( - self.user_id, room_id, EventTypes.Create, state_key="" + create_requester(self.user_id), room_id, EventTypes.Create, state_key="" ) ) diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index d9bd8c4a28..c50f034b34 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -26,7 +26,7 @@ from synapse.rest.client import ( room_upgrade_rest_servlet, ) from synapse.server import HomeServer -from synapse.types import UserID +from synapse.types import UserID, create_requester from synapse.util import Clock from tests import unittest @@ -275,7 +275,7 @@ class ProfileTestCase(_ShadowBannedBase): message_handler = self.hs.get_message_handler() event = self.get_success( message_handler.get_room_data( - self.banned_user_id, + create_requester(self.banned_user_id), room_id, "m.room.member", self.banned_user_id, @@ -310,7 +310,7 @@ class ProfileTestCase(_ShadowBannedBase): message_handler = self.hs.get_message_handler() event = self.get_success( message_handler.get_room_data( - self.banned_user_id, + create_requester(self.banned_user_id), room_id, "m.room.member", self.banned_user_id, -- cgit 1.4.1 From 9385c41ba4fd9cbc86d074ff8fa69e2ae437eb88 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 23 Aug 2022 02:47:30 -0500 Subject: Fix Prometheus metrics being negative (mixed up start/end) (#13584) Fix: - https://github.com/matrix-org/synapse/pull/13535#discussion_r949582508 - https://github.com/matrix-org/synapse/pull/13533#discussion_r949577244 --- changelog.d/13584.misc | 1 + synapse/handlers/federation.py | 7 ++++++- synapse/handlers/federation_event.py | 10 ++++++++++ synapse/rest/client/room.py | 6 +++++- 4 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13584.misc diff --git a/changelog.d/13584.misc b/changelog.d/13584.misc new file mode 100644 index 0000000000..6b190181c8 --- /dev/null +++ b/changelog.d/13584.misc @@ -0,0 +1 @@ +Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index a09eaa4379..e151962055 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -86,9 +86,14 @@ backfill_processing_before_timer = Histogram( "sec", [], buckets=( + 0.1, + 0.5, 1.0, + 2.5, 5.0, + 7.5, 10.0, + 15.0, 20.0, 30.0, 40.0, @@ -482,7 +487,7 @@ class FederationHandler: processing_end_time = self.clock.time_msec() backfill_processing_before_timer.observe( - (processing_start_time - processing_end_time) / 1000 + (processing_end_time - processing_start_time) / 1000 ) success = await try_backfill(likely_domains) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 32326975a1..048c4111f6 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -104,15 +104,25 @@ backfill_processing_after_timer = Histogram( "sec", [], buckets=( + 0.1, + 0.25, + 0.5, 1.0, + 2.5, 5.0, + 7.5, 10.0, + 15.0, 20.0, + 25.0, 30.0, 40.0, + 50.0, 60.0, 80.0, + 100.0, 120.0, + 150.0, 180.0, "+Inf", ), diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 0eafbae457..3259de4802 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -116,9 +116,13 @@ messsages_response_timer = Histogram( 2.5, 5.0, 10.0, + 20.0, 30.0, 60.0, + 80.0, + 100.0, 120.0, + 150.0, 180.0, "+Inf", ), @@ -674,7 +678,7 @@ class RoomMessageListRestServlet(RestServlet): room_member_count = await make_deferred_yieldable(room_member_count_deferred) messsages_response_timer.labels( room_size=_RoomSize.from_member_count(room_member_count) - ).observe((processing_start_time - processing_end_time) / 1000) + ).observe((processing_end_time - processing_start_time) / 1000) return 200, msgs -- cgit 1.4.1 From 37f329c9adf6ed02df15661850f999edd9e5fd93 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 23 Aug 2022 10:48:35 +0200 Subject: Fix that sending server notices fail if avatar is `None` (#13566) Indroduced in #11846. --- changelog.d/13566.bugfix | 1 + synapse/handlers/room_member.py | 2 +- tests/rest/admin/test_server_notice.py | 56 ++++++++++++++++++++++ .../test_resource_limits_server_notices.py | 9 ++-- 4 files changed, 64 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13566.bugfix diff --git a/changelog.d/13566.bugfix b/changelog.d/13566.bugfix new file mode 100644 index 0000000000..6c44024add --- /dev/null +++ b/changelog.d/13566.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d1909665d6..65b9a655d4 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -689,7 +689,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): errcode=Codes.BAD_JSON, ) - if "avatar_url" in content: + if "avatar_url" in content and content.get("avatar_url") is not None: if not await self.profile_handler.check_avatar_size_and_mime_type( content["avatar_url"], ): diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index 81e125e27d..a2f347f666 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -159,6 +159,62 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) self.assertEqual("'msgtype' not in content", channel.json_body["error"]) + @override_config( + { + "server_notices": { + "system_mxid_localpart": "notices", + "system_mxid_avatar_url": "somthingwrong", + }, + "max_avatar_size": "10M", + } + ) + def test_invalid_avatar_url(self) -> None: + """If avatar url in homeserver.yaml is invalid and + "check avatar size and mime type" is set, an error is returned. + TODO: Should be checked when reading the configuration.""" + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={ + "user_id": self.other_user, + "content": {"msgtype": "m.text", "body": "test msg"}, + }, + ) + + self.assertEqual(500, channel.code, msg=channel.json_body) + self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) + + @override_config( + { + "server_notices": { + "system_mxid_localpart": "notices", + "system_mxid_display_name": "test display name", + "system_mxid_avatar_url": None, + }, + "max_avatar_size": "10M", + } + ) + def test_displayname_is_set_avatar_is_none(self) -> None: + """ + Tests that sending a server notices is successfully, + if a display_name is set, avatar_url is `None` and + "check avatar size and mime type" is set. + """ + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={ + "user_id": self.other_user, + "content": {"msgtype": "m.text", "body": "test msg"}, + }, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + + # user has one invite + self._check_invite_and_join_status(self.other_user, 1, 0) + def test_server_notice_disabled(self) -> None: """Tests that server returns error if server notice is disabled""" channel = self.make_request( diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index e07ae78fc4..bf403045e9 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -11,16 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventTypes, LimitBlockingTypes, ServerNoticeMsgType from synapse.api.errors import ResourceLimitError from synapse.rest import admin from synapse.rest.client import login, room, sync +from synapse.server import HomeServer from synapse.server_notices.resource_limits_server_notices import ( ResourceLimitsServerNotices, ) +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -52,7 +55,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): return config - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.server_notices_sender = self.hs.get_server_notices_sender() # relying on [1] is far from ideal, but the only case where @@ -251,7 +254,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): c["admin_contact"] = "mailto:user@test.com" return c - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = self.hs.get_datastores().main self.server_notices_sender = self.hs.get_server_notices_sender() self.server_notices_manager = self.hs.get_server_notices_manager() -- cgit 1.4.1 From f0b23927fce6c531019c2ad6097613fd8568ecde Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 23 Aug 2022 09:49:51 +0100 Subject: 1.66.0rc1 --- CHANGES.md | 61 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/13024.misc | 1 - changelog.d/13188.feature | 1 - changelog.d/13453.misc | 1 - changelog.d/13459.misc | 1 - changelog.d/13471.misc | 1 - changelog.d/13472.doc | 1 - changelog.d/13474.misc | 1 - changelog.d/13477.misc | 1 - changelog.d/13479.misc | 1 - changelog.d/13485.misc | 1 - changelog.d/13488.misc | 1 - changelog.d/13489.misc | 1 - changelog.d/13491.doc | 1 - changelog.d/13492.doc | 1 - changelog.d/13493.misc | 1 - changelog.d/13497.doc | 2 -- changelog.d/13499.misc | 1 - changelog.d/13502.misc | 1 - changelog.d/13503.feature | 1 - changelog.d/13514.bugfix | 1 - changelog.d/13515.doc | 1 - changelog.d/13522.misc | 1 - changelog.d/13525.bugfix | 1 - changelog.d/13531.misc | 1 - changelog.d/13533.misc | 1 - changelog.d/13534.misc | 1 - changelog.d/13535.misc | 1 - changelog.d/13536.doc | 1 - changelog.d/13537.bugfix | 1 - changelog.d/13538.doc | 1 - changelog.d/13541.misc | 1 - changelog.d/13543.misc | 1 - changelog.d/13544.misc | 1 - changelog.d/13545.misc | 1 - changelog.d/13547.misc | 1 - changelog.d/13549.feature | 1 - changelog.d/13549.misc | 1 - changelog.d/13551.feature | 1 - changelog.d/13554.misc | 1 - changelog.d/13558.misc | 1 - changelog.d/13563.feature | 1 - changelog.d/13566.bugfix | 1 - changelog.d/13574.bugfix | 1 - changelog.d/13584.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 47 files changed, 68 insertions(+), 46 deletions(-) delete mode 100644 changelog.d/13024.misc delete mode 100644 changelog.d/13188.feature delete mode 100644 changelog.d/13453.misc delete mode 100644 changelog.d/13459.misc delete mode 100644 changelog.d/13471.misc delete mode 100644 changelog.d/13472.doc delete mode 100644 changelog.d/13474.misc delete mode 100644 changelog.d/13477.misc delete mode 100644 changelog.d/13479.misc delete mode 100644 changelog.d/13485.misc delete mode 100644 changelog.d/13488.misc delete mode 100644 changelog.d/13489.misc delete mode 100644 changelog.d/13491.doc delete mode 100644 changelog.d/13492.doc delete mode 100644 changelog.d/13493.misc delete mode 100644 changelog.d/13497.doc delete mode 100644 changelog.d/13499.misc delete mode 100644 changelog.d/13502.misc delete mode 100644 changelog.d/13503.feature delete mode 100644 changelog.d/13514.bugfix delete mode 100644 changelog.d/13515.doc delete mode 100644 changelog.d/13522.misc delete mode 100644 changelog.d/13525.bugfix delete mode 100644 changelog.d/13531.misc delete mode 100644 changelog.d/13533.misc delete mode 100644 changelog.d/13534.misc delete mode 100644 changelog.d/13535.misc delete mode 100644 changelog.d/13536.doc delete mode 100644 changelog.d/13537.bugfix delete mode 100644 changelog.d/13538.doc delete mode 100644 changelog.d/13541.misc delete mode 100644 changelog.d/13543.misc delete mode 100644 changelog.d/13544.misc delete mode 100644 changelog.d/13545.misc delete mode 100644 changelog.d/13547.misc delete mode 100644 changelog.d/13549.feature delete mode 100644 changelog.d/13549.misc delete mode 100644 changelog.d/13551.feature delete mode 100644 changelog.d/13554.misc delete mode 100644 changelog.d/13558.misc delete mode 100644 changelog.d/13563.feature delete mode 100644 changelog.d/13566.bugfix delete mode 100644 changelog.d/13574.bugfix delete mode 100644 changelog.d/13584.misc diff --git a/CHANGES.md b/CHANGES.md index 8a023b769f..6eecb56837 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,64 @@ +Synapse 1.66.0rc1 (2022-08-23) +============================== + +Features +-------- + +- Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). ([\#13188](https://github.com/matrix-org/synapse/issues/13188), [\#13563](https://github.com/matrix-org/synapse/issues/13563)) +- Add forgotten status to Room Details API. ([\#13503](https://github.com/matrix-org/synapse/issues/13503)) +- Add an experimental implementation for [MSC3852](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) +- Add `org.matrix.msc2716v4` experimental room version with updated content fields. ([\#13551](https://github.com/matrix-org/synapse/issues/13551)) + + +Bugfixes +-------- + +- Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514)) +- Fix a bug in the `/event_reports` Admin API which meant that the total count could be larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525)) +- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537)) +- Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566)) +- Fix the `opentracing.force_tracing_for_users` config option not applying to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574)) + + +Improved Documentation +---------------------- + +- Add `openssl` example for generating registration HMAC digest. ([\#13472](https://github.com/matrix-org/synapse/issues/13472)) +- Tidy up Synapse's README. ([\#13491](https://github.com/matrix-org/synapse/issues/13491)) +- Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. ([\#13492](https://github.com/matrix-org/synapse/issues/13492)) +- Add a warning to retention documentation regarding the possibility of database corruption. ([\#13497](https://github.com/matrix-org/synapse/issues/13497)) +- Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. ([\#13515](https://github.com/matrix-org/synapse/issues/13515)) +- Add missing links in `user_consent` section of configuration manual. ([\#13536](https://github.com/matrix-org/synapse/issues/13536)) +- Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). ([\#13538](https://github.com/matrix-org/synapse/issues/13538)) + + +Internal Changes +---------------- + +- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024)) +- Allow use of both `@trace` and `@tag_args` stacked on the same function (tracing). ([\#13453](https://github.com/matrix-org/synapse/issues/13453)) +- Faster joins: update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459)) +- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471)) +- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474)) +- Faster room joins: Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477)) +- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488)) +- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485)) +- Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. ([\#13489](https://github.com/matrix-org/synapse/issues/13489)) +- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493)) +- Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. ([\#13499](https://github.com/matrix-org/synapse/issues/13499), [\#13554](https://github.com/matrix-org/synapse/issues/13554)) +- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502)) +- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547)) +- Faster room joins: Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531)) +- Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). ([\#13533](https://github.com/matrix-org/synapse/issues/13533)) +- Add metrics to track how the rate limiter is affecting requests (sleep/reject). ([\#13534](https://github.com/matrix-org/synapse/issues/13534), [\#13541](https://github.com/matrix-org/synapse/issues/13541)) +- Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). ([\#13535](https://github.com/matrix-org/synapse/issues/13535), [\#13584](https://github.com/matrix-org/synapse/issues/13584)) +- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543)) +- Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). ([\#13544](https://github.com/matrix-org/synapse/issues/13544)) +- Update metrics to track `/messages` response time by room size. ([\#13545](https://github.com/matrix-org/synapse/issues/13545)) +- Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) +- Make `HomeServerTestCase` load any configured homeserver modules automatically. ([\#13558](https://github.com/matrix-org/synapse/issues/13558)) + + Synapse 1.65.0 (2022-08-16) =========================== diff --git a/changelog.d/13024.misc b/changelog.d/13024.misc deleted file mode 100644 index aa43c82429..0000000000 --- a/changelog.d/13024.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. diff --git a/changelog.d/13188.feature b/changelog.d/13188.feature deleted file mode 100644 index 4c39b74289..0000000000 --- a/changelog.d/13188.feature +++ /dev/null @@ -1 +0,0 @@ -Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). diff --git a/changelog.d/13453.misc b/changelog.d/13453.misc deleted file mode 100644 index d30c5230c8..0000000000 --- a/changelog.d/13453.misc +++ /dev/null @@ -1 +0,0 @@ -Allow use of both `@trace` and `@tag_args` stacked on the same function (tracing). diff --git a/changelog.d/13459.misc b/changelog.d/13459.misc deleted file mode 100644 index e6082210a0..0000000000 --- a/changelog.d/13459.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: update the rejected state of events during de-partial-stating. diff --git a/changelog.d/13471.misc b/changelog.d/13471.misc deleted file mode 100644 index b55ff32c76..0000000000 --- a/changelog.d/13471.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up tests for notifications. diff --git a/changelog.d/13472.doc b/changelog.d/13472.doc deleted file mode 100644 index 2ff6317300..0000000000 --- a/changelog.d/13472.doc +++ /dev/null @@ -1 +0,0 @@ -Add `openssl` example for generating registration HMAC digest. diff --git a/changelog.d/13474.misc b/changelog.d/13474.misc deleted file mode 100644 index d34c661fed..0000000000 --- a/changelog.d/13474.misc +++ /dev/null @@ -1 +0,0 @@ -Add some miscellaneous comments to document sync, especially around `compute_state_delta`. diff --git a/changelog.d/13477.misc b/changelog.d/13477.misc deleted file mode 100644 index 5d21ae9d7a..0000000000 --- a/changelog.d/13477.misc +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. diff --git a/changelog.d/13479.misc b/changelog.d/13479.misc deleted file mode 100644 index 315930deab..0000000000 --- a/changelog.d/13479.misc +++ /dev/null @@ -1 +0,0 @@ -Use literals in place of `HTTPStatus` constants in tests. \ No newline at end of file diff --git a/changelog.d/13485.misc b/changelog.d/13485.misc deleted file mode 100644 index c75712b9ff..0000000000 --- a/changelog.d/13485.misc +++ /dev/null @@ -1 +0,0 @@ -Add comments about how event push actions are rotated. diff --git a/changelog.d/13488.misc b/changelog.d/13488.misc deleted file mode 100644 index 315930deab..0000000000 --- a/changelog.d/13488.misc +++ /dev/null @@ -1 +0,0 @@ -Use literals in place of `HTTPStatus` constants in tests. \ No newline at end of file diff --git a/changelog.d/13489.misc b/changelog.d/13489.misc deleted file mode 100644 index 5e4853860e..0000000000 --- a/changelog.d/13489.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. diff --git a/changelog.d/13491.doc b/changelog.d/13491.doc deleted file mode 100644 index 026f735549..0000000000 --- a/changelog.d/13491.doc +++ /dev/null @@ -1 +0,0 @@ -Tidy up Synapse's README. diff --git a/changelog.d/13492.doc b/changelog.d/13492.doc deleted file mode 100644 index fc4850d556..0000000000 --- a/changelog.d/13492.doc +++ /dev/null @@ -1 +0,0 @@ -Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. diff --git a/changelog.d/13493.misc b/changelog.d/13493.misc deleted file mode 100644 index d7d5c33a89..0000000000 --- a/changelog.d/13493.misc +++ /dev/null @@ -1 +0,0 @@ -Modify HTML template content to better support mobile devices' screen sizes. \ No newline at end of file diff --git a/changelog.d/13497.doc b/changelog.d/13497.doc deleted file mode 100644 index ef6dc2308d..0000000000 --- a/changelog.d/13497.doc +++ /dev/null @@ -1,2 +0,0 @@ -Add a warning to retention documentation regarding the possibility of database corruption. - diff --git a/changelog.d/13499.misc b/changelog.d/13499.misc deleted file mode 100644 index 99dbcebec8..0000000000 --- a/changelog.d/13499.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. diff --git a/changelog.d/13502.misc b/changelog.d/13502.misc deleted file mode 100644 index ed6832996e..0000000000 --- a/changelog.d/13502.misc +++ /dev/null @@ -1 +0,0 @@ -Add a linter script which will reject non-strict types in Pydantic models. diff --git a/changelog.d/13503.feature b/changelog.d/13503.feature deleted file mode 100644 index 4baabd1e32..0000000000 --- a/changelog.d/13503.feature +++ /dev/null @@ -1 +0,0 @@ -Add forgotten status to Room Details API. \ No newline at end of file diff --git a/changelog.d/13514.bugfix b/changelog.d/13514.bugfix deleted file mode 100644 index 7498af0e47..0000000000 --- a/changelog.d/13514.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: make `/joined_members` block whilst the room is partial stated. \ No newline at end of file diff --git a/changelog.d/13515.doc b/changelog.d/13515.doc deleted file mode 100644 index a4d9d97dcb..0000000000 --- a/changelog.d/13515.doc +++ /dev/null @@ -1 +0,0 @@ -Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. \ No newline at end of file diff --git a/changelog.d/13522.misc b/changelog.d/13522.misc deleted file mode 100644 index 0a8827205d..0000000000 --- a/changelog.d/13522.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending messages in rooms with thousands of local users. diff --git a/changelog.d/13525.bugfix b/changelog.d/13525.bugfix deleted file mode 100644 index dbd1adbc88..0000000000 --- a/changelog.d/13525.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in the `/event_reports` Admin API which meant that the total count could be larger than the number of results you can actually query for. \ No newline at end of file diff --git a/changelog.d/13531.misc b/changelog.d/13531.misc deleted file mode 100644 index 986122d3d0..0000000000 --- a/changelog.d/13531.misc +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. diff --git a/changelog.d/13533.misc b/changelog.d/13533.misc deleted file mode 100644 index ab4b18887a..0000000000 --- a/changelog.d/13533.misc +++ /dev/null @@ -1 +0,0 @@ -Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). diff --git a/changelog.d/13534.misc b/changelog.d/13534.misc deleted file mode 100644 index b488bf74c3..0000000000 --- a/changelog.d/13534.misc +++ /dev/null @@ -1 +0,0 @@ -Add metrics to track how the rate limiter is affecting requests (sleep/reject). diff --git a/changelog.d/13535.misc b/changelog.d/13535.misc deleted file mode 100644 index 6b190181c8..0000000000 --- a/changelog.d/13535.misc +++ /dev/null @@ -1 +0,0 @@ -Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). diff --git a/changelog.d/13536.doc b/changelog.d/13536.doc deleted file mode 100644 index c8752acb77..0000000000 --- a/changelog.d/13536.doc +++ /dev/null @@ -1 +0,0 @@ -Add missing links in `user_consent` section of configuration manual. diff --git a/changelog.d/13537.bugfix b/changelog.d/13537.bugfix deleted file mode 100644 index db843504b1..0000000000 --- a/changelog.d/13537.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add support for compression to federation responses. diff --git a/changelog.d/13538.doc b/changelog.d/13538.doc deleted file mode 100644 index 9215aeac5a..0000000000 --- a/changelog.d/13538.doc +++ /dev/null @@ -1 +0,0 @@ -Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). \ No newline at end of file diff --git a/changelog.d/13541.misc b/changelog.d/13541.misc deleted file mode 100644 index b488bf74c3..0000000000 --- a/changelog.d/13541.misc +++ /dev/null @@ -1 +0,0 @@ -Add metrics to track how the rate limiter is affecting requests (sleep/reject). diff --git a/changelog.d/13543.misc b/changelog.d/13543.misc deleted file mode 100644 index 0300f46cd8..0000000000 --- a/changelog.d/13543.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the number of tests using legacy TCP replication. diff --git a/changelog.d/13544.misc b/changelog.d/13544.misc deleted file mode 100644 index d84ba3f076..0000000000 --- a/changelog.d/13544.misc +++ /dev/null @@ -1 +0,0 @@ -Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). diff --git a/changelog.d/13545.misc b/changelog.d/13545.misc deleted file mode 100644 index 1cdbef179e..0000000000 --- a/changelog.d/13545.misc +++ /dev/null @@ -1 +0,0 @@ -Update metrics to track `/messages` response time by room size. diff --git a/changelog.d/13547.misc b/changelog.d/13547.misc deleted file mode 100644 index 0a8827205d..0000000000 --- a/changelog.d/13547.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending messages in rooms with thousands of local users. diff --git a/changelog.d/13549.feature b/changelog.d/13549.feature deleted file mode 100644 index b6a726789c..0000000000 --- a/changelog.d/13549.feature +++ /dev/null @@ -1 +0,0 @@ -Add an experimental implementation for [MSC3852](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). \ No newline at end of file diff --git a/changelog.d/13549.misc b/changelog.d/13549.misc deleted file mode 100644 index 5b4303e87e..0000000000 --- a/changelog.d/13549.misc +++ /dev/null @@ -1 +0,0 @@ -Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. \ No newline at end of file diff --git a/changelog.d/13551.feature b/changelog.d/13551.feature deleted file mode 100644 index 365673a3c1..0000000000 --- a/changelog.d/13551.feature +++ /dev/null @@ -1 +0,0 @@ -Add `org.matrix.msc2716v4` experimental room version with updated content fields. diff --git a/changelog.d/13554.misc b/changelog.d/13554.misc deleted file mode 100644 index 99dbcebec8..0000000000 --- a/changelog.d/13554.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. diff --git a/changelog.d/13558.misc b/changelog.d/13558.misc deleted file mode 100644 index e3f3e31740..0000000000 --- a/changelog.d/13558.misc +++ /dev/null @@ -1 +0,0 @@ -Make `HomeServerTestCase` load any configured homeserver modules automatically. \ No newline at end of file diff --git a/changelog.d/13563.feature b/changelog.d/13563.feature deleted file mode 100644 index 4c39b74289..0000000000 --- a/changelog.d/13563.feature +++ /dev/null @@ -1 +0,0 @@ -Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). diff --git a/changelog.d/13566.bugfix b/changelog.d/13566.bugfix deleted file mode 100644 index 6c44024add..0000000000 --- a/changelog.d/13566.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. \ No newline at end of file diff --git a/changelog.d/13574.bugfix b/changelog.d/13574.bugfix deleted file mode 100644 index 3899c137aa..0000000000 --- a/changelog.d/13574.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the `opentracing.force_tracing_for_users` config option not applying to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. \ No newline at end of file diff --git a/changelog.d/13584.misc b/changelog.d/13584.misc deleted file mode 100644 index 6b190181c8..0000000000 --- a/changelog.d/13584.misc +++ /dev/null @@ -1 +0,0 @@ -Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). diff --git a/debian/changelog b/debian/changelog index 917249f940..c3974261a9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.66.0~rc1) stable; urgency=medium + + * New Synapse release 1.66.0rc1. + + -- Synapse Packaging team Tue, 23 Aug 2022 09:48:55 +0100 + matrix-synapse-py3 (1.65.0) stable; urgency=medium * New Synapse release 1.65.0. diff --git a/pyproject.toml b/pyproject.toml index 9e59470ac8..745b6067aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.65.0" +version = "1.66.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From d6f5699737a36b8f5864020226d31a999b2ea0b5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 23 Aug 2022 10:09:45 +0100 Subject: Describe changes to admin API in 1.66 Cross-ref #13525 --- docs/admin_api/rooms.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index ac7c54c20e..7526956bec 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -337,6 +337,8 @@ A response body like the following is returned: } ``` +_Changed in Synapse 1.66:_ Added the `forgotten` key to the response body. + # Room Members API The Room Members admin API allows server admins to get a list of all members of a room. -- cgit 1.4.1 From f8b9abdcdb6f5393173bd90329d46a0e2ed50043 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 23 Aug 2022 10:10:20 +0100 Subject: Adjust changelog --- CHANGES.md | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6eecb56837..34c2f3ce33 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,19 +5,20 @@ Features -------- - Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). ([\#13188](https://github.com/matrix-org/synapse/issues/13188), [\#13563](https://github.com/matrix-org/synapse/issues/13563)) -- Add forgotten status to Room Details API. ([\#13503](https://github.com/matrix-org/synapse/issues/13503)) -- Add an experimental implementation for [MSC3852](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) -- Add `org.matrix.msc2716v4` experimental room version with updated content fields. ([\#13551](https://github.com/matrix-org/synapse/issues/13551)) +- Add forgotten status to [Room Details Admin API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#room-details-api). ([\#13503](https://github.com/matrix-org/synapse/issues/13503)) +- Add an experimental implementation for [MSC3852 (Expose user agents on `Device`)](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) +- Add `org.matrix.msc2716v4` experimental room version with updated content fields. Part of [MSC2716 (Importing history)](https://github.com/matrix-org/matrix-spec-proposals/pull/2716). ([\#13551](https://github.com/matrix-org/synapse/issues/13551)) +- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537)) +- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547)) Bugfixes -------- - Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514)) -- Fix a bug in the `/event_reports` Admin API which meant that the total count could be larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525)) -- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537)) +- Fix a bug introduced in Synapse 1.21 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525)) - Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566)) -- Fix the `opentracing.force_tracing_for_users` config option not applying to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574)) +- Fix a bug where the `opentracing.force_tracing_for_users` config option not applying to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574)) Improved Documentation @@ -35,26 +36,33 @@ Improved Documentation Internal Changes ---------------- -- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024)) -- Allow use of both `@trace` and `@tag_args` stacked on the same function (tracing). ([\#13453](https://github.com/matrix-org/synapse/issues/13453)) -- Faster joins: update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459)) -- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471)) -- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474)) +### Faster room joins + +- Faster room joins: update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459)) - Faster room joins: Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477)) -- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488)) -- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485)) +- Faster room joins: Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531)) + +### Metrics and tracing + +- Allow use of both `@trace` and `@tag_args` stacked on the same function. ([\#13453](https://github.com/matrix-org/synapse/issues/13453)) - Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. ([\#13489](https://github.com/matrix-org/synapse/issues/13489)) -- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493)) - Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. ([\#13499](https://github.com/matrix-org/synapse/issues/13499), [\#13554](https://github.com/matrix-org/synapse/issues/13554)) -- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502)) -- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547)) -- Faster room joins: Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531)) - Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). ([\#13533](https://github.com/matrix-org/synapse/issues/13533)) - Add metrics to track how the rate limiter is affecting requests (sleep/reject). ([\#13534](https://github.com/matrix-org/synapse/issues/13534), [\#13541](https://github.com/matrix-org/synapse/issues/13541)) - Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). ([\#13535](https://github.com/matrix-org/synapse/issues/13535), [\#13584](https://github.com/matrix-org/synapse/issues/13584)) -- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543)) - Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). ([\#13544](https://github.com/matrix-org/synapse/issues/13544)) - Update metrics to track `/messages` response time by room size. ([\#13545](https://github.com/matrix-org/synapse/issues/13545)) + +### Everything else + +- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024)) +- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471)) +- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474)) +- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488)) +- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485)) +- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493)) +- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502)) +- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543)) - Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) - Make `HomeServerTestCase` load any configured homeserver modules automatically. ([\#13558](https://github.com/matrix-org/synapse/issues/13558)) -- cgit 1.4.1 From 79281f517daea3769f0ea2be044e0b899f4ee101 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 23 Aug 2022 10:22:47 +0100 Subject: Update changelog --- CHANGES.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 34c2f3ce33..778713f528 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -16,9 +16,9 @@ Bugfixes -------- - Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514)) -- Fix a bug introduced in Synapse 1.21 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525)) +- Fix a bug introduced in Synapse 1.21.0 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525)) - Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566)) -- Fix a bug where the `opentracing.force_tracing_for_users` config option not applying to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574)) +- Fix a bug where the `opentracing.force_tracing_for_users` config option would not apply to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574)) Improved Documentation @@ -38,9 +38,9 @@ Internal Changes ### Faster room joins -- Faster room joins: update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459)) -- Faster room joins: Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477)) -- Faster room joins: Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531)) +- Update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459)) +- Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477)) +- Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531)) ### Metrics and tracing -- cgit 1.4.1 From 5e7847dc923142bc68834f9b9538ada3fdd887d5 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Tue, 23 Aug 2022 10:49:59 +0100 Subject: Cache user IDs instead of profile objects (#13573) The profile objects are never used and increase cache size significantly. --- changelog.d/13573.misc | 1 + synapse/handlers/sync.py | 4 +- synapse/state/__init__.py | 13 +++--- synapse/storage/databases/main/roommember.py | 67 ++++++++++++---------------- synapse/util/caches/descriptors.py | 26 ++++++++--- 5 files changed, 57 insertions(+), 54 deletions(-) create mode 100644 changelog.d/13573.misc diff --git a/changelog.d/13573.misc b/changelog.d/13573.misc new file mode 100644 index 0000000000..1ce9c0c081 --- /dev/null +++ b/changelog.d/13573.misc @@ -0,0 +1 @@ +Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b4d3f3958c..2d95b1fa24 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -2421,10 +2421,10 @@ class SyncHandler: joined_room.room_id, joined_room.event_pos.stream ) ) - users_in_room = await self.state.get_current_users_in_room( + user_ids_in_room = await self.state.get_current_user_ids_in_room( joined_room.room_id, extrems ) - if user_id in users_in_room: + if user_id in user_ids_in_room: joined_room_ids.add(joined_room.room_id) return frozenset(joined_room_ids) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index c355e4f98a..3047e1b1ad 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -44,7 +44,6 @@ from synapse.logging.context import ContextResourceUsage from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour -from synapse.storage.roommember import ProfileInfo from synapse.storage.state import StateFilter from synapse.types import StateMap from synapse.util.async_helpers import Linearizer @@ -210,11 +209,11 @@ class StateHandler: ret = await self.resolve_state_groups_for_events(room_id, event_ids) return await ret.get_state(self._state_storage_controller, state_filter) - async def get_current_users_in_room( + async def get_current_user_ids_in_room( self, room_id: str, latest_event_ids: List[str] - ) -> Dict[str, ProfileInfo]: + ) -> Set[str]: """ - Get the users who are currently in a room. + Get the users IDs who are currently in a room. Note: This is much slower than using the equivalent method `DataStore.get_users_in_room` or `DataStore.get_users_in_room_with_profiles`, @@ -225,15 +224,15 @@ class StateHandler: room_id: The ID of the room. latest_event_ids: Precomputed list of latest event IDs. Will be computed if None. Returns: - Dictionary of user IDs to their profileinfo. + Set of user IDs in the room. """ assert latest_event_ids is not None - logger.debug("calling resolve_state_groups from get_current_users_in_room") + logger.debug("calling resolve_state_groups from get_current_user_ids_in_room") entry = await self.resolve_state_groups_for_events(room_id, latest_event_ids) state = await entry.get_state(self._state_storage_controller, StateFilter.all()) - return await self.store.get_joined_users_from_state(room_id, state, entry) + return await self.store.get_joined_user_ids_from_state(room_id, state, entry) async def get_hosts_in_room_at_events( self, room_id: str, event_ids: Collection[str] diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 827c1f1efd..0eb024a809 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -835,9 +835,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): return shared_room_ids or frozenset() - async def get_joined_users_from_state( + async def get_joined_user_ids_from_state( self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry" - ) -> Dict[str, ProfileInfo]: + ) -> Set[str]: state_group: Union[object, int] = state_entry.state_group if not state_group: # If state_group is None it means it has yet to be assigned a @@ -848,25 +848,25 @@ class RoomMemberWorkerStore(EventsWorkerStore): assert state_group is not None with Measure(self._clock, "get_joined_users_from_state"): - return await self._get_joined_users_from_context( + return await self._get_joined_user_ids_from_context( room_id, state_group, state, context=state_entry ) @cached(num_args=2, iterable=True, max_entries=100000) - async def _get_joined_users_from_context( + async def _get_joined_user_ids_from_context( self, room_id: str, state_group: Union[object, int], current_state_ids: StateMap[str], event: Optional[EventBase] = None, context: Optional["_StateCacheEntry"] = None, - ) -> Dict[str, ProfileInfo]: + ) -> Set[str]: # We don't use `state_group`, it's there so that we can cache based # on it. However, it's important that it's never None, since two current_states # with a state_group of None are likely to be different. assert state_group is not None - users_in_room = {} + users_in_room = set() member_event_ids = [ e_id for key, e_id in current_state_ids.items() @@ -879,11 +879,11 @@ class RoomMemberWorkerStore(EventsWorkerStore): # If we do then we can reuse that result and simply update it with # any membership changes in `delta_ids` if context.prev_group and context.delta_ids: - prev_res = self._get_joined_users_from_context.cache.get_immediate( + prev_res = self._get_joined_user_ids_from_context.cache.get_immediate( (room_id, context.prev_group), None ) - if prev_res and isinstance(prev_res, dict): - users_in_room = dict(prev_res) + if prev_res and isinstance(prev_res, set): + users_in_room = prev_res member_event_ids = [ e_id for key, e_id in context.delta_ids.items() @@ -891,7 +891,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): ] for etype, state_key in context.delta_ids: if etype == EventTypes.Member: - users_in_room.pop(state_key, None) + users_in_room.discard(state_key) # We check if we have any of the member event ids in the event cache # before we ask the DB @@ -908,42 +908,41 @@ class RoomMemberWorkerStore(EventsWorkerStore): ev_entry = event_map.get(event_id) if ev_entry and not ev_entry.event.rejected_reason: if ev_entry.event.membership == Membership.JOIN: - users_in_room[ev_entry.event.state_key] = ProfileInfo( - display_name=ev_entry.event.content.get("displayname", None), - avatar_url=ev_entry.event.content.get("avatar_url", None), - ) + users_in_room.add(ev_entry.event.state_key) else: missing_member_event_ids.append(event_id) if missing_member_event_ids: - event_to_memberships = await self._get_joined_profiles_from_event_ids( + event_to_memberships = await self._get_user_ids_from_membership_event_ids( missing_member_event_ids ) - users_in_room.update(row for row in event_to_memberships.values() if row) + users_in_room.update(event_to_memberships.values()) if event is not None and event.type == EventTypes.Member: if event.membership == Membership.JOIN: if event.event_id in member_event_ids: - users_in_room[event.state_key] = ProfileInfo( - display_name=event.content.get("displayname", None), - avatar_url=event.content.get("avatar_url", None), - ) + users_in_room.add(event.state_key) return users_in_room - @cached(max_entries=10000) - def _get_joined_profile_from_event_id( + @cached( + max_entries=10000, + # This name matches the old function that has been replaced - the cache name + # is kept here to maintain backwards compatibility. + name="_get_joined_profile_from_event_id", + ) + def _get_user_id_from_membership_event_id( self, event_id: str ) -> Optional[Tuple[str, ProfileInfo]]: raise NotImplementedError() @cachedList( - cached_method_name="_get_joined_profile_from_event_id", + cached_method_name="_get_user_id_from_membership_event_id", list_name="event_ids", ) - async def _get_joined_profiles_from_event_ids( + async def _get_user_ids_from_membership_event_ids( self, event_ids: Iterable[str] - ) -> Dict[str, Optional[Tuple[str, ProfileInfo]]]: + ) -> Dict[str, str]: """For given set of member event_ids check if they point to a join event and if so return the associated user and profile info. @@ -958,21 +957,13 @@ class RoomMemberWorkerStore(EventsWorkerStore): table="room_memberships", column="event_id", iterable=event_ids, - retcols=("user_id", "display_name", "avatar_url", "event_id"), + retcols=("user_id", "event_id"), keyvalues={"membership": Membership.JOIN}, batch_size=1000, - desc="_get_joined_profiles_from_event_ids", + desc="_get_user_ids_from_membership_event_ids", ) - return { - row["event_id"]: ( - row["user_id"], - ProfileInfo( - avatar_url=row["avatar_url"], display_name=row["display_name"] - ), - ) - for row in rows - } + return {row["event_id"]: row["user_id"] for row in rows} @cached(max_entries=10000) async def is_host_joined(self, room_id: str, host: str) -> bool: @@ -1131,12 +1122,12 @@ class RoomMemberWorkerStore(EventsWorkerStore): else: # The cache doesn't match the state group or prev state group, # so we calculate the result from first principles. - joined_users = await self.get_joined_users_from_state( + joined_user_ids = await self.get_joined_user_ids_from_state( room_id, state, state_entry ) cache.hosts_to_joined_users = {} - for user_id in joined_users: + for user_id in joined_user_ids: host = intern_string(get_domain_from_id(user_id)) cache.hosts_to_joined_users.setdefault(host, set()).add(user_id) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 867f315b2a..9d4bc89edb 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -73,8 +73,10 @@ class _CacheDescriptorBase: num_args: Optional[int], uncached_args: Optional[Collection[str]] = None, cache_context: bool = False, + name: Optional[str] = None, ): self.orig = orig + self.name = name or orig.__name__ arg_spec = inspect.getfullargspec(orig) all_args = arg_spec.args @@ -211,7 +213,7 @@ class LruCacheDescriptor(_CacheDescriptorBase): def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: cache: LruCache[CacheKey, Any] = LruCache( - cache_name=self.orig.__name__, + cache_name=self.name, max_size=self.max_entries, ) @@ -241,7 +243,7 @@ class LruCacheDescriptor(_CacheDescriptorBase): wrapped = cast(_CachedFunction, _wrapped) wrapped.cache = cache - obj.__dict__[self.orig.__name__] = wrapped + obj.__dict__[self.name] = wrapped return wrapped @@ -301,12 +303,14 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): cache_context: bool = False, iterable: bool = False, prune_unread_entries: bool = True, + name: Optional[str] = None, ): super().__init__( orig, num_args=num_args, uncached_args=uncached_args, cache_context=cache_context, + name=name, ) if tree and self.num_args < 2: @@ -321,7 +325,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: cache: DeferredCache[CacheKey, Any] = DeferredCache( - name=self.orig.__name__, + name=self.name, max_entries=self.max_entries, tree=self.tree, iterable=self.iterable, @@ -372,7 +376,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): wrapped.cache = cache wrapped.num_args = self.num_args - obj.__dict__[self.orig.__name__] = wrapped + obj.__dict__[self.name] = wrapped return wrapped @@ -393,6 +397,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): cached_method_name: str, list_name: str, num_args: Optional[int] = None, + name: Optional[str] = None, ): """ Args: @@ -403,7 +408,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): but including list_name) to use as cache keys. Defaults to all named args of the function. """ - super().__init__(orig, num_args=num_args, uncached_args=None) + super().__init__(orig, num_args=num_args, uncached_args=None, name=name) self.list_name = list_name @@ -525,7 +530,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): else: return defer.succeed(results) - obj.__dict__[self.orig.__name__] = wrapped + obj.__dict__[self.name] = wrapped return wrapped @@ -577,6 +582,7 @@ def cached( cache_context: bool = False, iterable: bool = False, prune_unread_entries: bool = True, + name: Optional[str] = None, ) -> Callable[[F], _CachedFunction[F]]: func = lambda orig: DeferredCacheDescriptor( orig, @@ -587,13 +593,18 @@ def cached( cache_context=cache_context, iterable=iterable, prune_unread_entries=prune_unread_entries, + name=name, ) return cast(Callable[[F], _CachedFunction[F]], func) def cachedList( - *, cached_method_name: str, list_name: str, num_args: Optional[int] = None + *, + cached_method_name: str, + list_name: str, + num_args: Optional[int] = None, + name: Optional[str] = None, ) -> Callable[[F], _CachedFunction[F]]: """Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`. @@ -628,6 +639,7 @@ def cachedList( cached_method_name=cached_method_name, list_name=list_name, num_args=num_args, + name=name, ) return cast(Callable[[F], _CachedFunction[F]], func) -- cgit 1.4.1 From 956e015413d3da417c1058e3e72d97b3d1bc8170 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 23 Aug 2022 12:40:00 +0100 Subject: Drop support for delegating email validation, round 2 (#13596) --- CHANGES.md | 12 +++ changelog.d/13596.removal | 1 + docs/upgrade.md | 19 ++++ docs/usage/configuration/config_documentation.md | 5 +- synapse/app/homeserver.py | 3 +- synapse/config/emailconfig.py | 46 ++-------- synapse/config/registration.py | 13 +-- synapse/handlers/identity.py | 56 +----------- synapse/handlers/ui_auth/checkers.py | 21 +---- synapse/rest/client/account.py | 108 ++++++++--------------- synapse/rest/client/register.py | 59 +++++-------- synapse/rest/synapse/client/password_reset.py | 8 +- tests/rest/client/test_register.py | 2 +- 13 files changed, 108 insertions(+), 245 deletions(-) create mode 100644 changelog.d/13596.removal diff --git a/CHANGES.md b/CHANGES.md index 778713f528..14fafc260d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,12 @@ Synapse 1.66.0rc1 (2022-08-23) ============================== +This release removes the ability for homeservers to delegate email ownership +verification and password reset confirmation to identity servers. This removal +was originally planned for Synapse 1.64, but was later deferred until now. + +See the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. + Features -------- @@ -33,6 +39,12 @@ Improved Documentation - Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). ([\#13538](https://github.com/matrix-org/synapse/issues/13538)) +Deprecations and Removals +------------------------- + +- Remove the ability for homeservers to delegate email ownership verification + and password reset confirmation to identity servers. See [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. + Internal Changes ---------------- diff --git a/changelog.d/13596.removal b/changelog.d/13596.removal new file mode 100644 index 0000000000..6c12ae75b4 --- /dev/null +++ b/changelog.d/13596.removal @@ -0,0 +1 @@ +Remove the ability for homeservers to delegate email ownership verification and password reset confirmation to identity servers. See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.66/docs/upgrade.md#upgrading-to-v1660) for more details. \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index 47a74b67de..0ab5bfeaf0 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,25 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.66.0 + +## Delegation of email validation no longer supported + +As of this version, Synapse no longer allows the tasks of verifying email address +ownership, and password reset confirmation, to be delegated to an identity server. +This removal was previously planned for Synapse 1.64.0, but was +[delayed](https://github.com/matrix-org/synapse/issues/13421) until now to give +homeserver administrators more notice of the change. + +To continue to allow users to add email addresses to their homeserver accounts, +and perform password resets, make sure that Synapse is configured with a working +email server in the [`email` configuration +section](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email) +(including, at a minimum, a `notif_from` setting.) + +Specifying an `email` setting under `account_threepid_delegates` will now cause +an error at startup. + # Upgrading to v1.64.0 ## Deprecation of the ability to delegate e-mail verification to identity servers diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index cc72966823..8ae018e628 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2182,7 +2182,10 @@ their account. by the Matrix Identity Service API [specification](https://matrix.org/docs/spec/identity_service/latest).) -*Updated in Synapse 1.64.0*: The `email` option is deprecated. +*Deprecated in Synapse 1.64.0*: The `email` option is deprecated. + +*Removed in Synapse 1.66.0*: The `email` option has been removed. +If present, Synapse will report a configuration error on startup. Example configuration: ```yaml diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index d98012adeb..68993d91a9 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -44,7 +44,6 @@ from synapse.app._base import ( register_start, ) from synapse.config._base import ConfigError, format_config_error -from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.server import ListenerConfig from synapse.federation.transport.server import TransportLayerServer @@ -202,7 +201,7 @@ class SynapseHomeServer(HomeServer): } ) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: from synapse.rest.synapse.client.password_reset import ( PasswordResetSubmitTokenResource, ) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 66a6dbf1fe..a3af35b7c4 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -18,7 +18,6 @@ import email.utils import logging import os -from enum import Enum from typing import Any import attr @@ -136,40 +135,22 @@ class EmailConfig(Config): self.email_enable_notifs = email_config.get("enable_notifs", False) - self.threepid_behaviour_email = ( - # Have Synapse handle the email sending if account_threepid_delegates.email - # is not defined - # msisdn is currently always remote while Synapse does not support any method of - # sending SMS messages - ThreepidBehaviour.REMOTE - if self.root.registration.account_threepid_delegate_email - else ThreepidBehaviour.LOCAL - ) - if config.get("trust_identity_server_for_password_resets"): raise ConfigError( - 'The config option "trust_identity_server_for_password_resets" has been removed.' - "Please consult the configuration manual at docs/usage/configuration/config_documentation.md for " - "details and update your config file." + 'The config option "trust_identity_server_for_password_resets" ' + "is no longer supported. Please remove it from the config file." ) - self.local_threepid_handling_disabled_due_to_email_config = False - if ( - self.threepid_behaviour_email == ThreepidBehaviour.LOCAL - and email_config == {} - ): - # We cannot warn the user this has happened here - # Instead do so when a user attempts to reset their password - self.local_threepid_handling_disabled_due_to_email_config = True - - self.threepid_behaviour_email = ThreepidBehaviour.OFF + # If we have email config settings, assume that we can verify ownership of + # email addresses. + self.can_verify_email = email_config != {} # Get lifetime of a validation token in milliseconds self.email_validation_token_lifetime = self.parse_duration( email_config.get("validation_token_lifetime", "1h") ) - if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.can_verify_email: missing = [] if not self.email_notif_from: missing.append("email.notif_from") @@ -360,18 +341,3 @@ class EmailConfig(Config): "Config option email.invite_client_location must be a http or https URL", path=("email", "invite_client_location"), ) - - -class ThreepidBehaviour(Enum): - """ - Enum to define the behaviour of Synapse with regards to when it contacts an identity - server for 3pid registration and password resets - - REMOTE = use an external server to send tokens - LOCAL = send tokens ourselves - OFF = disable registration via 3pid and password resets - """ - - REMOTE = "remote" - LOCAL = "local" - OFF = "off" diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 01fb0331bc..a888d976f2 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import argparse -import logging from typing import Any, Optional from synapse.api.constants import RoomCreationPreset @@ -21,15 +20,11 @@ from synapse.config._base import Config, ConfigError from synapse.types import JsonDict, RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool -logger = logging.getLogger(__name__) - -LEGACY_EMAIL_DELEGATE_WARNING = """\ -Delegation of email verification to an identity server is now deprecated. To +NO_EMAIL_DELEGATE_ERROR = """\ +Delegation of email verification to an identity server is no longer supported. To continue to allow users to add email addresses to their accounts, and use them for password resets, configure Synapse with an SMTP server via the `email` setting, and remove `account_threepid_delegates.email`. - -This will be an error in a future version. """ @@ -64,9 +59,7 @@ class RegistrationConfig(Config): account_threepid_delegates = config.get("account_threepid_delegates") or {} if "email" in account_threepid_delegates: - logger.warning(LEGACY_EMAIL_DELEGATE_WARNING) - - self.account_threepid_delegate_email = account_threepid_delegates.get("email") + raise ConfigError(NO_EMAIL_DELEGATE_ERROR) self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index e5afe84df9..9571d461c8 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -26,7 +26,6 @@ from synapse.api.errors import ( SynapseError, ) from synapse.api.ratelimiting import Ratelimiter -from synapse.config.emailconfig import ThreepidBehaviour from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest @@ -416,48 +415,6 @@ class IdentityHandler: return session_id - async def request_email_token( - self, - id_server: str, - email: str, - client_secret: str, - send_attempt: int, - next_link: Optional[str] = None, - ) -> JsonDict: - """ - Request an external server send an email on our behalf for the purposes of threepid - validation. - - Args: - id_server: The identity server to proxy to - email: The email to send the message to - client_secret: The unique client_secret sends by the user - send_attempt: Which attempt this is - next_link: A link to redirect the user to once they submit the token - - Returns: - The json response body from the server - """ - params = { - "email": email, - "client_secret": client_secret, - "send_attempt": send_attempt, - } - if next_link: - params["next_link"] = next_link - - try: - data = await self.http_client.post_json_get_json( - id_server + "/_matrix/identity/api/v1/validate/email/requestToken", - params, - ) - return data - except HttpResponseException as e: - logger.info("Proxied requestToken failed: %r", e) - raise e.to_synapse_error() - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - async def requestMsisdnToken( self, id_server: str, @@ -531,18 +488,7 @@ class IdentityHandler: validation_session = None # Try to validate as email - if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - # Remote emails will only be used if a valid identity server is provided. - assert ( - self.hs.config.registration.account_threepid_delegate_email is not None - ) - - # Ask our delegated email identity server - validation_session = await self.threepid_from_creds( - self.hs.config.registration.account_threepid_delegate_email, - threepid_creds, - ) - elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.hs.config.email.can_verify_email: # Get a validated session matching these details validation_session = await self.store.get_threepid_validation_session( "email", client_secret, sid=sid, validated=True diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 05cebb5d4d..a744d68c64 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -19,7 +19,6 @@ from twisted.web.client import PartialDownloadError from synapse.api.constants import LoginType from synapse.api.errors import Codes, LoginError, SynapseError -from synapse.config.emailconfig import ThreepidBehaviour from synapse.util import json_decoder if TYPE_CHECKING: @@ -153,7 +152,7 @@ class _BaseThreepidAuthChecker: logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - # msisdns are currently always ThreepidBehaviour.REMOTE + # msisdns are currently always verified via the IS if medium == "msisdn": if not self.hs.config.registration.account_threepid_delegate_msisdn: raise SynapseError( @@ -164,18 +163,7 @@ class _BaseThreepidAuthChecker: threepid_creds, ) elif medium == "email": - if ( - self.hs.config.email.threepid_behaviour_email - == ThreepidBehaviour.REMOTE - ): - assert self.hs.config.registration.account_threepid_delegate_email - threepid = await identity_handler.threepid_from_creds( - self.hs.config.registration.account_threepid_delegate_email, - threepid_creds, - ) - elif ( - self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL - ): + if self.hs.config.email.can_verify_email: threepid = None row = await self.store.get_threepid_validation_session( medium, @@ -227,10 +215,7 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec _BaseThreepidAuthChecker.__init__(self, hs) def is_enabled(self) -> bool: - return self.hs.config.email.threepid_behaviour_email in ( - ThreepidBehaviour.REMOTE, - ThreepidBehaviour.LOCAL, - ) + return self.hs.config.email.can_verify_email async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("email", authdict) diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 9041e29d6c..1f9a8ccc23 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -29,7 +29,6 @@ from synapse.api.errors import ( SynapseError, ThreepidValidationError, ) -from synapse.config.emailconfig import ThreepidBehaviour from synapse.handlers.ui_auth import UIAuthSessionDataConstants from synapse.http.server import HttpServer, finish_request, respond_with_html from synapse.http.servlet import ( @@ -68,7 +67,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): self.config = hs.config self.identity_handler = hs.get_identity_handler() - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -77,11 +76,10 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.email.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "User password resets have been disabled due to lack of email config" - ) + if not self.config.email.can_verify_email: + logger.warning( + "User password resets have been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based password resets have been disabled on this server" ) @@ -117,35 +115,20 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.registration.account_threepid_delegate_email - - # Have the configured identity server handle the request - ret = await self.identity_handler.request_email_token( - self.hs.config.registration.account_threepid_delegate_email, - body.email, - body.client_secret, - body.send_attempt, - body.next_link, - ) - else: - # Send password reset emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - body.email, - body.client_secret, - body.send_attempt, - self.mailer.send_password_reset_mail, - body.next_link, - ) - - # Wrap the session id in a JSON object - ret = {"sid": sid} - + # Send password reset emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + body.email, + body.client_secret, + body.send_attempt, + self.mailer.send_password_reset_mail, + body.next_link, + ) threepid_send_requests.labels(type="email", reason="password_reset").observe( body.send_attempt ) - return 200, ret + # Wrap the session id in a JSON object + return 200, {"sid": sid} class PasswordRestServlet(RestServlet): @@ -340,7 +323,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() self.store = self.hs.get_datastores().main - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -349,11 +332,10 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.email.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) + if not self.config.email.can_verify_email: + logger.warning( + "Adding emails have been disabled due to lack of an email config" + ) raise SynapseError( 400, "Adding an email to your account is disabled on this server", @@ -391,35 +373,21 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.registration.account_threepid_delegate_email - - # Have the configured identity server handle the request - ret = await self.identity_handler.request_email_token( - self.hs.config.registration.account_threepid_delegate_email, - body.email, - body.client_secret, - body.send_attempt, - body.next_link, - ) - else: - # Send threepid validation emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - body.email, - body.client_secret, - body.send_attempt, - self.mailer.send_add_threepid_mail, - body.next_link, - ) - - # Wrap the session id in a JSON object - ret = {"sid": sid} + # Send threepid validation emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + body.email, + body.client_secret, + body.send_attempt, + self.mailer.send_add_threepid_mail, + body.next_link, + ) threepid_send_requests.labels(type="email", reason="add_threepid").observe( body.send_attempt ) - return 200, ret + # Wrap the session id in a JSON object + return 200, {"sid": sid} class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -512,24 +480,18 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): self.config = hs.config self.clock = hs.get_clock() self.store = hs.get_datastores().main - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: self._failure_email_template = ( self.config.email.email_add_threepid_template_failure_html ) async def on_GET(self, request: Request) -> None: - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.email.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) - raise SynapseError( - 400, "Adding an email to your account is disabled on this server" + if not self.config.email.can_verify_email: + logger.warning( + "Adding emails have been disabled due to lack of an email config" ) - elif self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: raise SynapseError( - 400, - "This homeserver is not validating threepids.", + 400, "Adding an email to your account is disabled on this server" ) sid = parse_string(request, "sid", required=True) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 1b953d3fa0..20bab20c8f 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -31,7 +31,6 @@ from synapse.api.errors import ( ) from synapse.api.ratelimiting import Ratelimiter from synapse.config import ConfigError -from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.ratelimiting import FederationRatelimitSettings from synapse.config.server import is_threepid_reserved @@ -74,7 +73,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() self.config = hs.config - if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.hs.config.email.can_verify_email: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -83,13 +82,10 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if ( - self.hs.config.email.local_threepid_handling_disabled_due_to_email_config - ): - logger.warning( - "Email registration has been disabled due to lack of email config" - ) + if not self.hs.config.email.can_verify_email: + logger.warning( + "Email registration has been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based registration has been disabled on this server" ) @@ -138,35 +134,21 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.registration.account_threepid_delegate_email - - # Have the configured identity server handle the request - ret = await self.identity_handler.request_email_token( - self.hs.config.registration.account_threepid_delegate_email, - email, - client_secret, - send_attempt, - next_link, - ) - else: - # Send registration emails from Synapse, - # wrapping the session id in a JSON object. - ret = { - "sid": await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_registration_mail, - next_link, - ) - } + # Send registration emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_registration_mail, + next_link, + ) threepid_send_requests.labels(type="email", reason="register").observe( send_attempt ) - return 200, ret + # Wrap the session id in a JSON object + return 200, {"sid": sid} class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -260,7 +242,7 @@ class RegistrationSubmitTokenServlet(RestServlet): self.clock = hs.get_clock() self.store = hs.get_datastores().main - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.email.can_verify_email: self._failure_email_template = ( self.config.email.email_registration_template_failure_html ) @@ -270,11 +252,10 @@ class RegistrationSubmitTokenServlet(RestServlet): raise SynapseError( 400, "This medium is currently not supported for registration" ) - if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.email.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "User registration via email has been disabled due to lack of email config" - ) + if not self.config.email.can_verify_email: + logger.warning( + "User registration via email has been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based registration is disabled on this server" ) diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py index 6ac9dbc7c9..b9402cfb75 100644 --- a/synapse/rest/synapse/client/password_reset.py +++ b/synapse/rest/synapse/client/password_reset.py @@ -17,7 +17,6 @@ from typing import TYPE_CHECKING, Tuple from twisted.web.server import Request from synapse.api.errors import ThreepidValidationError -from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.server import DirectServeHtmlResource from synapse.http.servlet import parse_string from synapse.util.stringutils import assert_valid_client_secret @@ -46,9 +45,6 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): self.clock = hs.get_clock() self.store = hs.get_datastores().main - self._local_threepid_handling_disabled_due_to_email_config = ( - hs.config.email.local_threepid_handling_disabled_due_to_email_config - ) self._confirmation_email_template = ( hs.config.email.email_password_reset_template_confirmation_html ) @@ -59,8 +55,8 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): hs.config.email.email_password_reset_template_failure_html ) - # This resource should not be mounted if threepid behaviour is not LOCAL - assert hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL + # This resource should only be mounted if email validation is enabled + assert hs.config.email.can_verify_email async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]: sid = parse_string(request, "sid", required=True) diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index ab4277dd31..b781875d52 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -586,9 +586,9 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "require_at_registration": True, }, "account_threepid_delegates": { - "email": "https://id_server", "msisdn": "https://id_server", }, + "email": {"notif_from": "Synapse "}, } ) def test_advertised_flows_captcha_and_terms_and_3pids(self) -> None: -- cgit 1.4.1 From ea85a2bf6cadea262bfd020dfdbffe67d026d6a8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 23 Aug 2022 12:40:35 +0100 Subject: Remove manually-added changelog --- changelog.d/13596.removal | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/13596.removal diff --git a/changelog.d/13596.removal b/changelog.d/13596.removal deleted file mode 100644 index 6c12ae75b4..0000000000 --- a/changelog.d/13596.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the ability for homeservers to delegate email ownership verification and password reset confirmation to identity servers. See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.66/docs/upgrade.md#upgrading-to-v1660) for more details. \ No newline at end of file -- cgit 1.4.1 From aec87a0f9369a3015b2a53469f88d1de274e8b71 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Aug 2022 13:15:43 +0100 Subject: Speed up fetching large numbers of push rules (#13592) --- changelog.d/13592.misc | 1 + synapse/replication/slave/storage/push_rule.py | 1 - synapse/storage/databases/main/account_data.py | 3 --- synapse/storage/databases/main/push_rule.py | 6 +----- 4 files changed, 2 insertions(+), 9 deletions(-) create mode 100644 changelog.d/13592.misc diff --git a/changelog.d/13592.misc b/changelog.d/13592.misc new file mode 100644 index 0000000000..8f48d557e5 --- /dev/null +++ b/changelog.d/13592.misc @@ -0,0 +1 @@ +Minor speed up of fetching large numbers of push rules. diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py index 52ee3f7e58..5e65eaf1e0 100644 --- a/synapse/replication/slave/storage/push_rule.py +++ b/synapse/replication/slave/storage/push_rule.py @@ -31,6 +31,5 @@ class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore): self._push_rules_stream_id_gen.advance(instance_name, token) for row in rows: self.get_push_rules_for_user.invalidate((row.user_id,)) - self.get_push_rules_enabled_for_user.invalidate((row.user_id,)) self.push_rules_stream_cache.entity_has_changed(row.user_id, token) return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 9af9f4f18e..c38b8a9e5a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -650,9 +650,6 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) txn, self.get_account_data_for_room, (user_id,) ) self._invalidate_cache_and_stream(txn, self.get_push_rules_for_user, (user_id,)) - self._invalidate_cache_and_stream( - txn, self.get_push_rules_enabled_for_user, (user_id,) - ) # This user might be contained in the ignored_by cache for other users, # so we have to invalidate it all. self._invalidate_all_cache_and_stream(txn, self.ignored_by) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 255620f996..5079edd1e0 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -165,7 +165,6 @@ class PushRulesWorkerStore( return _load_rules(rows, enabled_map, self.hs.config.experimental) - @cached(max_entries=5000) async def get_push_rules_enabled_for_user(self, user_id: str) -> Dict[str, bool]: results = await self.db_pool.simple_select_list( table="push_rules_enable", @@ -229,9 +228,6 @@ class PushRulesWorkerStore( return results - @cachedList( - cached_method_name="get_push_rules_enabled_for_user", list_name="user_ids" - ) async def bulk_get_push_rules_enabled( self, user_ids: Collection[str] ) -> Dict[str, Dict[str, bool]]: @@ -246,6 +242,7 @@ class PushRulesWorkerStore( iterable=user_ids, retcols=("user_name", "rule_id", "enabled"), desc="bulk_get_push_rules_enabled", + batch_size=1000, ) for row in rows: enabled = bool(row["enabled"]) @@ -792,7 +789,6 @@ class PushRuleStore(PushRulesWorkerStore): self.db_pool.simple_insert_txn(txn, "push_rules_stream", values=values) txn.call_after(self.get_push_rules_for_user.invalidate, (user_id,)) - txn.call_after(self.get_push_rules_enabled_for_user.invalidate, (user_id,)) txn.call_after( self.push_rules_stream_cache.entity_has_changed, user_id, stream_id ) -- cgit 1.4.1 From bdfff9c36e8ac625a509fc424695e6b9acf84103 Mon Sep 17 00:00:00 2001 From: nilsKr3 <75118031+nilsKr3@users.noreply.github.com> Date: Tue, 23 Aug 2022 15:34:10 +0200 Subject: Update openid.md (#13568) Linking the help article may prevent confusion regarding the creation of the necessary rule using auth0. --- docs/openid.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/openid.md b/docs/openid.md index d0ccf36f71..ce9b026228 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -174,7 +174,9 @@ oidc_providers: 1. Create a regular web application for Synapse 2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/client/oidc/callback` -3. Add a rule to add the `preferred_username` claim. +3. Add a rule with any name to add the `preferred_username` claim. +(See https://auth0.com/docs/customize/rules/create-rules for more information on how to create rules.) +
Code sample -- cgit 1.4.1 From 05c9c7363b09d2517a79915b831ce423c7defc7e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Aug 2022 15:14:05 +0100 Subject: Fix regression caused by #13573 (#13600) Broke in #13573. --- changelog.d/13600.misc | 1 + synapse/storage/databases/main/roommember.py | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13600.misc diff --git a/changelog.d/13600.misc b/changelog.d/13600.misc new file mode 100644 index 0000000000..1ce9c0c081 --- /dev/null +++ b/changelog.d/13600.misc @@ -0,0 +1 @@ +Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 0eb024a809..046ad3a11c 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -916,7 +916,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): event_to_memberships = await self._get_user_ids_from_membership_event_ids( missing_member_event_ids ) - users_in_room.update(event_to_memberships.values()) + users_in_room.update( + user_id for user_id in event_to_memberships.values() if user_id + ) if event is not None and event.type == EventTypes.Member: if event.membership == Membership.JOIN: @@ -942,15 +944,15 @@ class RoomMemberWorkerStore(EventsWorkerStore): ) async def _get_user_ids_from_membership_event_ids( self, event_ids: Iterable[str] - ) -> Dict[str, str]: + ) -> Dict[str, Optional[str]]: """For given set of member event_ids check if they point to a join - event and if so return the associated user and profile info. + event. Args: event_ids: The member event IDs to lookup Returns: - Map from event ID to `user_id` and ProfileInfo (or None if not join event). + Map from event ID to `user_id`, or None if event is not a join. """ rows = await self.db_pool.simple_select_many_batch( -- cgit 1.4.1 From f7ddfe17a30a50205a23bf5ca4d7d71e691e1e48 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Aug 2022 15:53:27 +0100 Subject: Speed up `@cachedList` (#13591) This speeds things up by ~2x. The vast majority of the time is now spent in `LruCache` moving things around the linked lists. We do this via two things: 1. Don't create a deferred per-key during bulk set operations in `DeferredCache`. Instead, only create them if a subsequent caller asks for the key. 2. Add a bulk lookup API to `DeferredCache` rather than use a loop. --- changelog.d/13591.misc | 1 + synapse/util/caches/deferred_cache.py | 346 +++++++++++++++++++++++++--------- synapse/util/caches/descriptors.py | 89 ++++----- synapse/util/caches/treecache.py | 3 + 4 files changed, 298 insertions(+), 141 deletions(-) create mode 100644 changelog.d/13591.misc diff --git a/changelog.d/13591.misc b/changelog.d/13591.misc new file mode 100644 index 0000000000..080e865e55 --- /dev/null +++ b/changelog.d/13591.misc @@ -0,0 +1 @@ +Improve performance of `@cachedList`. diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 1d6ec22191..6425f851ea 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -14,15 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +import abc import enum import threading from typing import ( Callable, + Collection, + Dict, Generic, - Iterable, MutableMapping, Optional, + Set, Sized, + Tuple, TypeVar, Union, cast, @@ -31,7 +35,6 @@ from typing import ( from prometheus_client import Gauge from twisted.internet import defer -from twisted.python import failure from twisted.python.failure import Failure from synapse.util.async_helpers import ObservableDeferred @@ -94,7 +97,7 @@ class DeferredCache(Generic[KT, VT]): # _pending_deferred_cache maps from the key value to a `CacheEntry` object. self._pending_deferred_cache: Union[ - TreeCache, "MutableMapping[KT, CacheEntry]" + TreeCache, "MutableMapping[KT, CacheEntry[KT, VT]]" ] = cache_type() def metrics_cb() -> None: @@ -159,15 +162,16 @@ class DeferredCache(Generic[KT, VT]): Raises: KeyError if the key is not found in the cache """ - callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) if val is not _Sentinel.sentinel: - val.callbacks.update(callbacks) + val.add_invalidation_callback(key, callback) if update_metrics: m = self.cache.metrics assert m # we always have a name, so should always have metrics m.inc_hits() - return val.deferred.observe() + return val.deferred(key) + + callbacks = (callback,) if callback else () val2 = self.cache.get( key, _Sentinel.sentinel, callbacks=callbacks, update_metrics=update_metrics @@ -177,6 +181,73 @@ class DeferredCache(Generic[KT, VT]): else: return defer.succeed(val2) + def get_bulk( + self, + keys: Collection[KT], + callback: Optional[Callable[[], None]] = None, + ) -> Tuple[Dict[KT, VT], Optional["defer.Deferred[Dict[KT, VT]]"], Collection[KT]]: + """Bulk lookup of items in the cache. + + Returns: + A 3-tuple of: + 1. a dict of key/value of items already cached; + 2. a deferred that resolves to a dict of key/value of items + we're already fetching; and + 3. a collection of keys that don't appear in the previous two. + """ + + # The cached results + cached = {} + + # List of pending deferreds + pending = [] + + # Dict that gets filled out when the pending deferreds complete + pending_results = {} + + # List of keys that aren't in either cache + missing = [] + + callbacks = (callback,) if callback else () + + for key in keys: + # Check if its in the main cache. + immediate_value = self.cache.get( + key, + _Sentinel.sentinel, + callbacks=callbacks, + ) + if immediate_value is not _Sentinel.sentinel: + cached[key] = immediate_value + continue + + # Check if its in the pending cache + pending_value = self._pending_deferred_cache.get(key, _Sentinel.sentinel) + if pending_value is not _Sentinel.sentinel: + pending_value.add_invalidation_callback(key, callback) + + def completed_cb(value: VT, key: KT) -> VT: + pending_results[key] = value + return value + + # Add a callback to fill out `pending_results` when that completes + d = pending_value.deferred(key).addCallback(completed_cb, key) + pending.append(d) + continue + + # Not in either cache + missing.append(key) + + # If we've got pending deferreds, squash them into a single one that + # returns `pending_results`. + pending_deferred = None + if pending: + pending_deferred = defer.gatherResults( + pending, consumeErrors=True + ).addCallback(lambda _: pending_results) + + return (cached, pending_deferred, missing) + def get_immediate( self, key: KT, default: T, update_metrics: bool = True ) -> Union[VT, T]: @@ -218,84 +289,89 @@ class DeferredCache(Generic[KT, VT]): value: a deferred which will complete with a result to add to the cache callback: An optional callback to be called when the entry is invalidated """ - if not isinstance(value, defer.Deferred): - raise TypeError("not a Deferred") - - callbacks = [callback] if callback else [] self.check_thread() - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry: - existing_entry.invalidate() + self._pending_deferred_cache.pop(key, None) # XXX: why don't we invalidate the entry in `self.cache` yet? - # we can save a whole load of effort if the deferred is ready. - if value.called: - result = value.result - if not isinstance(result, failure.Failure): - self.cache.set(key, cast(VT, result), callbacks) - return value - # otherwise, we'll add an entry to the _pending_deferred_cache for now, # and add callbacks to add it to the cache properly later. + entry = CacheEntrySingle[KT, VT](value) + entry.add_invalidation_callback(key, callback) + self._pending_deferred_cache[key] = entry + deferred = entry.deferred(key).addCallbacks( + self._completed_callback, + self._error_callback, + callbackArgs=(entry, key), + errbackArgs=(entry, key), + ) - observable = ObservableDeferred(value, consumeErrors=True) - observer = observable.observe() - entry = CacheEntry(deferred=observable, callbacks=callbacks) + # we return a new Deferred which will be called before any subsequent observers. + return deferred - self._pending_deferred_cache[key] = entry + def start_bulk_input( + self, + keys: Collection[KT], + callback: Optional[Callable[[], None]] = None, + ) -> "CacheMultipleEntries[KT, VT]": + """Bulk set API for use when fetching multiple keys at once from the DB. - def compare_and_pop() -> bool: - """Check if our entry is still the one in _pending_deferred_cache, and - if so, pop it. - - Returns true if the entries matched. - """ - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry is entry: - return True - - # oops, the _pending_deferred_cache has been updated since - # we started our query, so we are out of date. - # - # Better put back whatever we took out. (We do it this way - # round, rather than peeking into the _pending_deferred_cache - # and then removing on a match, to make the common case faster) - if existing_entry is not None: - self._pending_deferred_cache[key] = existing_entry - - return False - - def cb(result: VT) -> None: - if compare_and_pop(): - self.cache.set(key, result, entry.callbacks) - else: - # we're not going to put this entry into the cache, so need - # to make sure that the invalidation callbacks are called. - # That was probably done when _pending_deferred_cache was - # updated, but it's possible that `set` was called without - # `invalidate` being previously called, in which case it may - # not have been. Either way, let's double-check now. - entry.invalidate() - - def eb(_fail: Failure) -> None: - compare_and_pop() - entry.invalidate() - - # once the deferred completes, we can move the entry from the - # _pending_deferred_cache to the real cache. - # - observer.addCallbacks(cb, eb) + Called *before* starting the fetch from the DB, and the caller *must* + call either `complete_bulk(..)` or `error_bulk(..)` on the return value. + """ - # we return a new Deferred which will be called before any subsequent observers. - return observable.observe() + entry = CacheMultipleEntries[KT, VT]() + entry.add_global_invalidation_callback(callback) + + for key in keys: + self._pending_deferred_cache[key] = entry + + return entry + + def _completed_callback( + self, value: VT, entry: "CacheEntry[KT, VT]", key: KT + ) -> VT: + """Called when a deferred is completed.""" + # We check if the current entry matches the entry associated with the + # deferred. If they don't match then it got invalidated. + current_entry = self._pending_deferred_cache.pop(key, None) + if current_entry is not entry: + if current_entry: + self._pending_deferred_cache[key] = current_entry + return value + + self.cache.set(key, value, entry.get_invalidation_callbacks(key)) + + return value + + def _error_callback( + self, + failure: Failure, + entry: "CacheEntry[KT, VT]", + key: KT, + ) -> Failure: + """Called when a deferred errors.""" + + # We check if the current entry matches the entry associated with the + # deferred. If they don't match then it got invalidated. + current_entry = self._pending_deferred_cache.pop(key, None) + if current_entry is not entry: + if current_entry: + self._pending_deferred_cache[key] = current_entry + return failure + + for cb in entry.get_invalidation_callbacks(key): + cb() + + return failure def prefill( self, key: KT, value: VT, callback: Optional[Callable[[], None]] = None ) -> None: - callbacks = [callback] if callback else [] + callbacks = (callback,) if callback else () self.cache.set(key, value, callbacks=callbacks) + self._pending_deferred_cache.pop(key, None) def invalidate(self, key: KT) -> None: """Delete a key, or tree of entries @@ -311,41 +387,129 @@ class DeferredCache(Generic[KT, VT]): self.cache.del_multi(key) # if we have a pending lookup for this key, remove it from the - # _pending_deferred_cache, which will (a) stop it being returned - # for future queries and (b) stop it being persisted as a proper entry + # _pending_deferred_cache, which will (a) stop it being returned for + # future queries and (b) stop it being persisted as a proper entry # in self.cache. entry = self._pending_deferred_cache.pop(key, None) - - # run the invalidation callbacks now, rather than waiting for the - # deferred to resolve. if entry: # _pending_deferred_cache.pop should either return a CacheEntry, or, in the # case of a TreeCache, a dict of keys to cache entries. Either way calling # iterate_tree_cache_entry on it will do the right thing. for entry in iterate_tree_cache_entry(entry): - entry.invalidate() + for cb in entry.get_invalidation_callbacks(key): + cb() def invalidate_all(self) -> None: self.check_thread() self.cache.clear() - for entry in self._pending_deferred_cache.values(): - entry.invalidate() + for key, entry in self._pending_deferred_cache.items(): + for cb in entry.get_invalidation_callbacks(key): + cb() + self._pending_deferred_cache.clear() -class CacheEntry: - __slots__ = ["deferred", "callbacks", "invalidated"] +class CacheEntry(Generic[KT, VT], metaclass=abc.ABCMeta): + """Abstract class for entries in `DeferredCache[KT, VT]`""" - def __init__( - self, deferred: ObservableDeferred, callbacks: Iterable[Callable[[], None]] - ): - self.deferred = deferred - self.callbacks = set(callbacks) - self.invalidated = False - - def invalidate(self) -> None: - if not self.invalidated: - self.invalidated = True - for callback in self.callbacks: - callback() - self.callbacks.clear() + @abc.abstractmethod + def deferred(self, key: KT) -> "defer.Deferred[VT]": + """Get a deferred that a caller can wait on to get the value at the + given key""" + ... + + @abc.abstractmethod + def add_invalidation_callback( + self, key: KT, callback: Optional[Callable[[], None]] + ) -> None: + """Add an invalidation callback""" + ... + + @abc.abstractmethod + def get_invalidation_callbacks(self, key: KT) -> Collection[Callable[[], None]]: + """Get all invalidation callbacks""" + ... + + +class CacheEntrySingle(CacheEntry[KT, VT]): + """An implementation of `CacheEntry` wrapping a deferred that results in a + single cache entry. + """ + + __slots__ = ["_deferred", "_callbacks"] + + def __init__(self, deferred: "defer.Deferred[VT]") -> None: + self._deferred = ObservableDeferred(deferred, consumeErrors=True) + self._callbacks: Set[Callable[[], None]] = set() + + def deferred(self, key: KT) -> "defer.Deferred[VT]": + return self._deferred.observe() + + def add_invalidation_callback( + self, key: KT, callback: Optional[Callable[[], None]] + ) -> None: + if callback is None: + return + + self._callbacks.add(callback) + + def get_invalidation_callbacks(self, key: KT) -> Collection[Callable[[], None]]: + return self._callbacks + + +class CacheMultipleEntries(CacheEntry[KT, VT]): + """Cache entry that is used for bulk lookups and insertions.""" + + __slots__ = ["_deferred", "_callbacks", "_global_callbacks"] + + def __init__(self) -> None: + self._deferred: Optional[ObservableDeferred[Dict[KT, VT]]] = None + self._callbacks: Dict[KT, Set[Callable[[], None]]] = {} + self._global_callbacks: Set[Callable[[], None]] = set() + + def deferred(self, key: KT) -> "defer.Deferred[VT]": + if not self._deferred: + self._deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True) + return self._deferred.observe().addCallback(lambda res: res.get(key)) + + def add_invalidation_callback( + self, key: KT, callback: Optional[Callable[[], None]] + ) -> None: + if callback is None: + return + + self._callbacks.setdefault(key, set()).add(callback) + + def get_invalidation_callbacks(self, key: KT) -> Collection[Callable[[], None]]: + return self._callbacks.get(key, set()) | self._global_callbacks + + def add_global_invalidation_callback( + self, callback: Optional[Callable[[], None]] + ) -> None: + """Add a callback for when any keys get invalidated.""" + if callback is None: + return + + self._global_callbacks.add(callback) + + def complete_bulk( + self, + cache: DeferredCache[KT, VT], + result: Dict[KT, VT], + ) -> None: + """Called when there is a result""" + for key, value in result.items(): + cache._completed_callback(value, self, key) + + if self._deferred: + self._deferred.callback(result) + + def error_bulk( + self, cache: DeferredCache[KT, VT], keys: Collection[KT], failure: Failure + ) -> None: + """Called when bulk lookup failed.""" + for key in keys: + cache._error_callback(failure, self, key) + + if self._deferred: + self._deferred.errback(failure) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 9d4bc89edb..10aff4d04a 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -25,6 +25,7 @@ from typing import ( Generic, Hashable, Iterable, + List, Mapping, Optional, Sequence, @@ -440,16 +441,6 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names] list_args = arg_dict[self.list_name] - results = {} - - def update_results_dict(res: Any, arg: Hashable) -> None: - results[arg] = res - - # list of deferreds to wait for - cached_defers = [] - - missing = set() - # If the cache takes a single arg then that is used as the key, # otherwise a tuple is used. if num_args == 1: @@ -457,6 +448,9 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): def arg_to_cache_key(arg: Hashable) -> Hashable: return arg + def cache_key_to_arg(key: tuple) -> Hashable: + return key + else: keylist = list(keyargs) @@ -464,58 +458,53 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): keylist[self.list_pos] = arg return tuple(keylist) - for arg in list_args: - try: - res = cache.get(arg_to_cache_key(arg), callback=invalidate_callback) - if not res.called: - res.addCallback(update_results_dict, arg) - cached_defers.append(res) - else: - results[arg] = res.result - except KeyError: - missing.add(arg) + def cache_key_to_arg(key: tuple) -> Hashable: + return key[self.list_pos] + + cache_keys = [arg_to_cache_key(arg) for arg in list_args] + immediate_results, pending_deferred, missing = cache.get_bulk( + cache_keys, callback=invalidate_callback + ) + + results = {cache_key_to_arg(key): v for key, v in immediate_results.items()} + + cached_defers: List["defer.Deferred[Any]"] = [] + if pending_deferred: + + def update_results(r: Dict) -> None: + for k, v in r.items(): + results[cache_key_to_arg(k)] = v + + pending_deferred.addCallback(update_results) + cached_defers.append(pending_deferred) if missing: - # we need a deferred for each entry in the list, - # which we put in the cache. Each deferred resolves with the - # relevant result for that key. - deferreds_map = {} - for arg in missing: - deferred: "defer.Deferred[Any]" = defer.Deferred() - deferreds_map[arg] = deferred - key = arg_to_cache_key(arg) - cached_defers.append( - cache.set(key, deferred, callback=invalidate_callback) - ) + cache_entry = cache.start_bulk_input(missing, invalidate_callback) def complete_all(res: Dict[Hashable, Any]) -> None: - # the wrapped function has completed. It returns a dict. - # We can now update our own result map, and then resolve the - # observable deferreds in the cache. - for e, d1 in deferreds_map.items(): - val = res.get(e, None) - # make sure we update the results map before running the - # deferreds, because as soon as we run the last deferred, the - # gatherResults() below will complete and return the result - # dict to our caller. - results[e] = val - d1.callback(val) + missing_results = {} + for key in missing: + arg = cache_key_to_arg(key) + val = res.get(arg, None) + + results[arg] = val + missing_results[key] = val + + cache_entry.complete_bulk(cache, missing_results) def errback_all(f: Failure) -> None: - # the wrapped function has failed. Propagate the failure into - # the cache, which will invalidate the entry, and cause the - # relevant cached_deferreds to fail, which will propagate the - # failure to our caller. - for d1 in deferreds_map.values(): - d1.errback(f) + cache_entry.error_bulk(cache, missing, f) args_to_call = dict(arg_dict) - args_to_call[self.list_name] = missing + args_to_call[self.list_name] = { + cache_key_to_arg(key) for key in missing + } # dispatch the call, and attach the two handlers - defer.maybeDeferred( + missing_d = defer.maybeDeferred( preserve_fn(self.orig), **args_to_call ).addCallbacks(complete_all, errback_all) + cached_defers.append(missing_d) if cached_defers: d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks( diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index c1b8ec0c73..fec31da2b6 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -135,6 +135,9 @@ class TreeCache: def values(self): return iterate_tree_cache_entry(self.root) + def items(self): + return iterate_tree_cache_items((), self.root) + def __len__(self) -> int: return self.size -- cgit 1.4.1 From a25a37002c851ef419d12925a11dd8bf2233470e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 23 Aug 2022 18:41:55 +0100 Subject: Write about the chain cover a little. (#13602) Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/13602.doc | 1 + docs/auth_chain_difference_algorithm.md | 51 +++++++++++++++++++++++++++------ 2 files changed, 43 insertions(+), 9 deletions(-) create mode 100644 changelog.d/13602.doc diff --git a/changelog.d/13602.doc b/changelog.d/13602.doc new file mode 100644 index 0000000000..dbba082163 --- /dev/null +++ b/changelog.d/13602.doc @@ -0,0 +1 @@ +Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. diff --git a/docs/auth_chain_difference_algorithm.md b/docs/auth_chain_difference_algorithm.md index 30f72a70da..ebc9de25b8 100644 --- a/docs/auth_chain_difference_algorithm.md +++ b/docs/auth_chain_difference_algorithm.md @@ -34,13 +34,45 @@ the process of indexing it). ## Chain Cover Index Synapse computes auth chain differences by pre-computing a "chain cover" index -for the auth chain in a room, allowing efficient reachability queries like "is -event A in the auth chain of event B". This is done by assigning every event a -*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links* -between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A` -is in the auth chain of `B`) if and only if either: - -1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s +for the auth chain in a room, allowing us to efficiently make reachability queries +like "is event `A` in the auth chain of event `B`?". We could do this with an index +that tracks all pairs `(A, B)` such that `A` is in the auth chain of `B`. However, this +would be prohibitively large, scaling poorly as the room accumulates more state +events. + +Instead, we break down the graph into *chains*. A chain is a subset of a DAG +with the following property: for any pair of events `E` and `F` in the chain, +the chain contains a path `E -> F` or a path `F -> E`. This forces a chain to be +linear (without forks), e.g. `E -> F -> G -> ... -> H`. Each event in the chain +is given a *sequence number* local to that chain. The oldest event `E` in the +chain has sequence number 1. If `E` has a child `F` in the chain, then `F` has +sequence number 2. If `E` has a grandchild `G` in the chain, then `G` has +sequence number 3; and so on. + +Synapse ensures that each persisted event belongs to exactly one chain, and +tracks how the chains are connected to one another. This allows us to +efficiently answer reachability queries. Doing so uses less storage than +tracking reachability on an event-by-event basis, particularly when we have +fewer and longer chains. See + +> Jagadish, H. (1990). [A compression technique to materialize transitive closure](https://doi.org/10.1145/99935.99944). +> *ACM Transactions on Database Systems (TODS)*, 15*(4)*, 558-598. + +for the original idea or + +> Y. Chen, Y. Chen, [An efficient algorithm for answering graph +> reachability queries](https://doi.org/10.1109/ICDE.2008.4497498), +> in: 2008 IEEE 24th International Conference on Data Engineering, April 2008, +> pp. 893–902. (PDF available via [Google Scholar](https://scholar.google.com/scholar?q=Y.%20Chen,%20Y.%20Chen,%20An%20efficient%20algorithm%20for%20answering%20graph%20reachability%20queries,%20in:%202008%20IEEE%2024th%20International%20Conference%20on%20Data%20Engineering,%20April%202008,%20pp.%20893902.).) + +for a more modern take. + +In practical terms, the chain cover assigns every event a +*chain ID* and *sequence number* (e.g. `(5,3)`), and maintains a map of *links* +between events in chains (e.g. `(5,3) -> (2,4)`) such that `A` is reachable by `B` +(i.e. `A` is in the auth chain of `B`) if and only if either: + +1. `A` and `B` have the same chain ID and `A`'s sequence number is less than `B`'s sequence number; or 2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that `L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`. @@ -49,8 +81,9 @@ There are actually two potential implementations, one where we store links from each chain to every other reachable chain (the transitive closure of the links graph), and one where we remove redundant links (the transitive reduction of the links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1` -would not be stored. Synapse uses the former implementations so that it doesn't -need to recurse to test reachability between chains. +would not be stored. Synapse uses the former implementation so that it doesn't +need to recurse to test reachability between chains. This trades-off extra storage +in order to save CPU cycles and DB queries. ### Example -- cgit 1.4.1 From 7af07f9716e6b4a2238ecd435e19bf8501360bc7 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 23 Aug 2022 21:53:37 -0500 Subject: Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls (#13588) Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls because I've see `_check_sigs_and_hash_and_fetch` take [10.41s to process 100 events](https://github.com/matrix-org/synapse/issues/13587) Fix https://github.com/matrix-org/synapse/issues/13587 Part of https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/13588.misc | 1 + synapse/crypto/event_signing.py | 2 ++ synapse/events/spamcheck.py | 2 ++ synapse/federation/federation_base.py | 22 ++++++++++++++++++++++ synapse/federation/federation_client.py | 23 ++++++++++++++++++++--- 5 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13588.misc diff --git a/changelog.d/13588.misc b/changelog.d/13588.misc new file mode 100644 index 0000000000..eca1416ceb --- /dev/null +++ b/changelog.d/13588.misc @@ -0,0 +1 @@ +Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 7520647d1e..23b799ac32 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -28,6 +28,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.events.utils import prune_event, prune_event_dict +from synapse.logging.opentracing import trace from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -35,6 +36,7 @@ logger = logging.getLogger(__name__) Hasher = Callable[[bytes], "hashlib._Hash"] +@trace def check_event_content_hash( event: EventBase, hash_algorithm: Hasher = hashlib.sha256 ) -> bool: diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 4a3bfb38f1..623a2c71ea 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -32,6 +32,7 @@ from typing_extensions import Literal import synapse from synapse.api.errors import Codes +from synapse.logging.opentracing import trace from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour @@ -378,6 +379,7 @@ class SpamChecker: if check_media_file_for_spam is not None: self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam) + @trace async def check_event_for_spam( self, event: "synapse.events.EventBase" ) -> Union[Tuple[Codes, JsonDict], str]: diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 2522bf78fc..4269a98db2 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -23,6 +23,7 @@ from synapse.crypto.keyring import Keyring from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event, validate_canonicaljson from synapse.http.servlet import assert_params_in_dict +from synapse.logging.opentracing import log_kv, trace from synapse.types import JsonDict, get_domain_from_id if TYPE_CHECKING: @@ -55,6 +56,7 @@ class FederationBase: self._clock = hs.get_clock() self._storage_controllers = hs.get_storage_controllers() + @trace async def _check_sigs_and_hash( self, room_version: RoomVersion, pdu: EventBase ) -> EventBase: @@ -97,17 +99,36 @@ class FederationBase: "Event %s seems to have been redacted; using our redacted copy", pdu.event_id, ) + log_kv( + { + "message": "Event seems to have been redacted; using our redacted copy", + "event_id": pdu.event_id, + } + ) else: logger.warning( "Event %s content has been tampered, redacting", pdu.event_id, ) + log_kv( + { + "message": "Event content has been tampered, redacting", + "event_id": pdu.event_id, + } + ) return redacted_event spam_check = await self.spam_checker.check_event_for_spam(pdu) if spam_check != self.spam_checker.NOT_SPAM: logger.warning("Event contains spam, soft-failing %s", pdu.event_id) + log_kv( + { + "message": "Event contains spam, redacting (to save disk space) " + "as well as soft-failing (to stop using the event in prev_events)", + "event_id": pdu.event_id, + } + ) # we redact (to save disk space) as well as soft-failing (to stop # using the event in prev_events). redacted_event = prune_event(pdu) @@ -117,6 +138,7 @@ class FederationBase: return pdu +@trace async def _check_sigs_on_pdu( keyring: Keyring, room_version: RoomVersion, pdu: EventBase ) -> None: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 987f6dad46..7ee2974bb1 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -61,7 +61,7 @@ from synapse.federation.federation_base import ( ) from synapse.federation.transport.client import SendJoinResponse from synapse.http.types import QueryParams -from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace +from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -587,11 +587,15 @@ class FederationClient(FederationBase): Returns: A list of PDUs that have valid signatures and hashes. """ + set_tag( + SynapseTags.RESULT_PREFIX + "pdus.length", + str(len(pdus)), + ) # We limit how many PDUs we check at once, as if we try to do hundreds # of thousands of PDUs at once we see large memory spikes. - valid_pdus = [] + valid_pdus: List[EventBase] = [] async def _execute(pdu: EventBase) -> None: valid_pdu = await self._check_sigs_and_hash_and_fetch_one( @@ -607,6 +611,8 @@ class FederationClient(FederationBase): return valid_pdus + @trace + @tag_args async def _check_sigs_and_hash_and_fetch_one( self, pdu: EventBase, @@ -639,16 +645,27 @@ class FederationClient(FederationBase): except InvalidEventSignatureError as e: logger.warning( "Signature on retrieved event %s was invalid (%s). " - "Checking local store/orgin server", + "Checking local store/origin server", pdu.event_id, e, ) + log_kv( + { + "message": "Signature on retrieved event was invalid. " + "Checking local store/origin server", + "event_id": pdu.event_id, + "InvalidEventSignatureError": e, + } + ) # Check local db. res = await self.store.get_event( pdu.event_id, allow_rejected=True, allow_none=True ) + # If the PDU fails its signature check and we don't have it in our + # database, we then request it from sender's server (if that is not the + # same as `origin`). pdu_origin = get_domain_from_id(pdu.sender) if not res and pdu_origin != origin: try: -- cgit 1.4.1 From ba882c03579e96a97568fca3551207690b302b30 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 24 Aug 2022 09:09:59 +0000 Subject: Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. (#13583) Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/13583.bugfix | 1 + synapse/federation/federation_server.py | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelog.d/13583.bugfix diff --git a/changelog.d/13583.bugfix b/changelog.d/13583.bugfix new file mode 100644 index 0000000000..1e4ce5904b --- /dev/null +++ b/changelog.d/13583.bugfix @@ -0,0 +1 @@ +Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. \ No newline at end of file diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 75fbc6073d..3bf84cf625 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -763,6 +763,17 @@ class FederationServer(FederationBase): The partial knock event. """ origin_host, _ = parse_server_name(origin) + + if await self.store.is_partial_state_room(room_id): + # Before we do anything: check if the room is partial-stated. + # Note that at the time this check was added, `on_make_knock_request` would + # block due to https://github.com/matrix-org/synapse/issues/12997. + raise SynapseError( + 404, + "Unable to handle /make_knock right now; this server is not fully joined.", + errcode=Codes.NOT_FOUND, + ) + await self.check_server_matches_acl(origin_host, room_id) room_version = await self.store.get_room_version(room_id) -- cgit 1.4.1 From b687010f895ac54323a2fea96cd7720a6dfb99cd Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Wed, 24 Aug 2022 10:12:51 +0100 Subject: Rewrite get push actions queries (#13597) --- changelog.d/13597.misc | 1 + .../storage/databases/main/event_push_actions.py | 228 ++++++--------------- 2 files changed, 69 insertions(+), 160 deletions(-) create mode 100644 changelog.d/13597.misc diff --git a/changelog.d/13597.misc b/changelog.d/13597.misc new file mode 100644 index 0000000000..eb5e971008 --- /dev/null +++ b/changelog.d/13597.misc @@ -0,0 +1 @@ + Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index eabf9c9739..8dfa545c27 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -459,6 +459,32 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas return await self.db_pool.runInteraction("get_push_action_users_in_range", f) + def _get_receipts_by_room_txn( + self, txn: LoggingTransaction, user_id: str + ) -> List[Tuple[str, int]]: + receipt_types_clause, args = make_in_list_sql_clause( + self.database_engine, + "receipt_type", + ( + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.UNSTABLE_READ_PRIVATE, + ), + ) + + sql = f""" + SELECT room_id, MAX(stream_ordering) + FROM receipts_linearized + INNER JOIN events USING (room_id, event_id) + WHERE {receipt_types_clause} + AND user_id = ? + GROUP BY room_id + """ + + args.extend((user_id,)) + txn.execute(sql, args) + return cast(List[Tuple[str, int]], txn.fetchall()) + async def get_unread_push_actions_for_user_in_range_for_http( self, user_id: str, @@ -482,106 +508,45 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas The list will have between 0~limit entries. """ - # find rooms that have a read receipt in them and return the next - # push actions - def get_after_receipt( - txn: LoggingTransaction, - ) -> List[Tuple[str, str, int, str, bool]]: - # find rooms that have a read receipt in them and return the next - # push actions - - receipt_types_clause, args = make_in_list_sql_clause( - self.database_engine, - "receipt_type", - ( - ReceiptTypes.READ, - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, - ), - ) - - sql = f""" - SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, - ep.highlight - FROM ( - SELECT room_id, - MAX(stream_ordering) as stream_ordering - FROM events - INNER JOIN receipts_linearized USING (room_id, event_id) - WHERE {receipt_types_clause} AND user_id = ? - GROUP BY room_id - ) AS rl, - event_push_actions AS ep - WHERE - ep.room_id = rl.room_id - AND ep.stream_ordering > rl.stream_ordering - AND ep.user_id = ? - AND ep.stream_ordering > ? - AND ep.stream_ordering <= ? - AND ep.notif = 1 - ORDER BY ep.stream_ordering ASC LIMIT ? - """ - args.extend( - (user_id, user_id, min_stream_ordering, max_stream_ordering, limit) - ) - txn.execute(sql, args) - return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall()) - - after_read_receipt = await self.db_pool.runInteraction( - "get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt + receipts_by_room = dict( + await self.db_pool.runInteraction( + "get_unread_push_actions_for_user_in_range_http_receipts", + self._get_receipts_by_room_txn, + user_id=user_id, + ), ) - # There are rooms with push actions in them but you don't have a read receipt in - # them e.g. rooms you've been invited to, so get push actions for rooms which do - # not have read receipts in them too. - def get_no_receipt( + def get_push_actions_txn( txn: LoggingTransaction, ) -> List[Tuple[str, str, int, str, bool]]: - receipt_types_clause, args = make_in_list_sql_clause( - self.database_engine, - "receipt_type", - ( - ReceiptTypes.READ, - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, - ), - ) - - sql = f""" - SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, - ep.highlight + sql = """ + SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, ep.highlight FROM event_push_actions AS ep - INNER JOIN events AS e USING (room_id, event_id) WHERE - ep.room_id NOT IN ( - SELECT room_id FROM receipts_linearized - WHERE {receipt_types_clause} AND user_id = ? - GROUP BY room_id - ) - AND ep.user_id = ? + ep.user_id = ? AND ep.stream_ordering > ? AND ep.stream_ordering <= ? AND ep.notif = 1 ORDER BY ep.stream_ordering ASC LIMIT ? """ - args.extend( - (user_id, user_id, min_stream_ordering, max_stream_ordering, limit) - ) - txn.execute(sql, args) + txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit)) return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall()) - no_read_receipt = await self.db_pool.runInteraction( - "get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt + push_actions = await self.db_pool.runInteraction( + "get_unread_push_actions_for_user_in_range_http", get_push_actions_txn ) notifs = [ HttpPushAction( - event_id=row[0], - room_id=row[1], - stream_ordering=row[2], - actions=_deserialize_action(row[3], row[4]), + event_id=event_id, + room_id=room_id, + stream_ordering=stream_ordering, + actions=_deserialize_action(actions, highlight), ) - for row in after_read_receipt + no_read_receipt + for event_id, room_id, stream_ordering, actions, highlight in push_actions + # Only include push actions with a stream ordering after any receipt, or without any + # receipt present (invited to but never read rooms). + if stream_ordering > receipts_by_room.get(room_id, 0) ] # Now sort it so it's ordered correctly, since currently it will @@ -617,106 +582,49 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas The list will have between 0~limit entries. """ - # find rooms that have a read receipt in them and return the most recent - # push actions - def get_after_receipt( - txn: LoggingTransaction, - ) -> List[Tuple[str, str, int, str, bool, int]]: - receipt_types_clause, args = make_in_list_sql_clause( - self.database_engine, - "receipt_type", - ( - ReceiptTypes.READ, - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, - ), - ) - - sql = f""" - SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, - ep.highlight, e.received_ts - FROM ( - SELECT room_id, - MAX(stream_ordering) as stream_ordering - FROM events - INNER JOIN receipts_linearized USING (room_id, event_id) - WHERE {receipt_types_clause} AND user_id = ? - GROUP BY room_id - ) AS rl, - event_push_actions AS ep - INNER JOIN events AS e USING (room_id, event_id) - WHERE - ep.room_id = rl.room_id - AND ep.stream_ordering > rl.stream_ordering - AND ep.user_id = ? - AND ep.stream_ordering > ? - AND ep.stream_ordering <= ? - AND ep.notif = 1 - ORDER BY ep.stream_ordering DESC LIMIT ? - """ - args.extend( - (user_id, user_id, min_stream_ordering, max_stream_ordering, limit) - ) - txn.execute(sql, args) - return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall()) - - after_read_receipt = await self.db_pool.runInteraction( - "get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt + receipts_by_room = dict( + await self.db_pool.runInteraction( + "get_unread_push_actions_for_user_in_range_email_receipts", + self._get_receipts_by_room_txn, + user_id=user_id, + ), ) - # There are rooms with push actions in them but you don't have a read receipt in - # them e.g. rooms you've been invited to, so get push actions for rooms which do - # not have read receipts in them too. - def get_no_receipt( + def get_push_actions_txn( txn: LoggingTransaction, ) -> List[Tuple[str, str, int, str, bool, int]]: - receipt_types_clause, args = make_in_list_sql_clause( - self.database_engine, - "receipt_type", - ( - ReceiptTypes.READ, - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, - ), - ) - - sql = f""" + sql = """ SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, ep.highlight, e.received_ts FROM event_push_actions AS ep INNER JOIN events AS e USING (room_id, event_id) WHERE - ep.room_id NOT IN ( - SELECT room_id FROM receipts_linearized - WHERE {receipt_types_clause} AND user_id = ? - GROUP BY room_id - ) - AND ep.user_id = ? + ep.user_id = ? AND ep.stream_ordering > ? AND ep.stream_ordering <= ? AND ep.notif = 1 ORDER BY ep.stream_ordering DESC LIMIT ? """ - args.extend( - (user_id, user_id, min_stream_ordering, max_stream_ordering, limit) - ) - txn.execute(sql, args) + txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit)) return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall()) - no_read_receipt = await self.db_pool.runInteraction( - "get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt + push_actions = await self.db_pool.runInteraction( + "get_unread_push_actions_for_user_in_range_email", get_push_actions_txn ) # Make a list of dicts from the two sets of results. notifs = [ EmailPushAction( - event_id=row[0], - room_id=row[1], - stream_ordering=row[2], - actions=_deserialize_action(row[3], row[4]), - received_ts=row[5], + event_id=event_id, + room_id=room_id, + stream_ordering=stream_ordering, + actions=_deserialize_action(actions, highlight), + received_ts=received_ts, ) - for row in after_read_receipt + no_read_receipt + for event_id, room_id, stream_ordering, actions, highlight, received_ts in push_actions + # Only include push actions with a stream ordering after any receipt, or without any + # receipt present (invited to but never read rooms). + if stream_ordering > receipts_by_room.get(room_id, 0) ] # Now sort it so it's ordered correctly, since currently it will -- cgit 1.4.1 From 2e2040c93e69c630d293b36354d53d2a40b823bd Mon Sep 17 00:00:00 2001 From: Kat Gerasimova Date: Wed, 24 Aug 2022 12:10:32 +0100 Subject: Add GitHub automation for new issues (#13610) Set up automation to move newly opened issues in GitHub to the issue triage board. --- .github/workflows/triage-incoming.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/triage-incoming.yml diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml new file mode 100644 index 0000000000..8467970128 --- /dev/null +++ b/.github/workflows/triage-incoming.yml @@ -0,0 +1,28 @@ +name: Move new issues into the issue triage board + +on: + issues: + types: [ opened ] + +jobs: + add_new_issues: + name: Add new issues to the triage board + runs-on: ubuntu-latest + steps: + - uses: octokit/graphql-action@v2.x + id: add_to_project + with: + headers: '{"GraphQL-Features": "projects_next_graphql"}' + query: | + mutation add_to_project($projectid:ID!,$contentid:ID!) { + addProjectNextItem(input:{projectId:$projectid contentId:$contentid}) { + projectNextItem { + id + } + } + } + projectid: ${{ env.PROJECT_ID }} + contentid: ${{ github.event.issue.node_id }} + env: + PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ" + GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }} -- cgit 1.4.1 From be4250c7a888e314e361df42042bfa344ab65d55 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 24 Aug 2022 11:35:54 +0000 Subject: Add experimental configuration option to allow disabling legacy Prometheus metric names. (#13540) Co-authored-by: David Robertson --- changelog.d/13540.misc | 1 + synapse/app/_base.py | 39 ++++- synapse/app/generic_worker.py | 6 +- synapse/app/homeserver.py | 6 +- synapse/config/metrics.py | 29 ++++ synapse/metrics/__init__.py | 4 +- synapse/metrics/_exposition.py | 262 ------------------------------- synapse/metrics/_legacy_exposition.py | 284 ++++++++++++++++++++++++++++++++++ synapse/util/caches/__init__.py | 16 +- tests/test_metrics.py | 36 +++++ 10 files changed, 406 insertions(+), 277 deletions(-) create mode 100644 changelog.d/13540.misc delete mode 100644 synapse/metrics/_exposition.py create mode 100644 synapse/metrics/_legacy_exposition.py diff --git a/changelog.d/13540.misc b/changelog.d/13540.misc new file mode 100644 index 0000000000..07ace50b12 --- /dev/null +++ b/changelog.d/13540.misc @@ -0,0 +1 @@ +Add experimental configuration option to allow disabling legacy Prometheus metric names. \ No newline at end of file diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 923891ae0d..4742435d3b 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -266,15 +266,48 @@ def register_start( reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper())) -def listen_metrics(bind_addresses: Iterable[str], port: int) -> None: +def listen_metrics( + bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool +) -> None: """ Start Prometheus metrics server. """ - from synapse.metrics import RegistryProxy, start_http_server + from prometheus_client import start_http_server as start_http_server_prometheus + + from synapse.metrics import ( + RegistryProxy, + start_http_server as start_http_server_legacy, + ) for host in bind_addresses: logger.info("Starting metrics listener on %s:%d", host, port) - start_http_server(port, addr=host, registry=RegistryProxy) + if enable_legacy_metric_names: + start_http_server_legacy(port, addr=host, registry=RegistryProxy) + else: + _set_prometheus_client_use_created_metrics(False) + start_http_server_prometheus(port, addr=host, registry=RegistryProxy) + + +def _set_prometheus_client_use_created_metrics(new_value: bool) -> None: + """ + Sets whether prometheus_client should expose `_created`-suffixed metrics for + all gauges, histograms and summaries. + There is no programmatic way to disable this without poking at internals; + the proper way is to use an environment variable which prometheus_client + loads at import time. + + The motivation for disabling these `_created` metrics is that they're + a waste of space as they're not useful but they take up space in Prometheus. + """ + + import prometheus_client.metrics + + if hasattr(prometheus_client.metrics, "_use_created"): + prometheus_client.metrics._use_created = new_value + else: + logger.error( + "Can't disable `_created` metrics in prometheus_client (brittle hack broken?)" + ) def listen_manhole( diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 30e21d9707..5e3825fca6 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -412,7 +412,11 @@ class GenericWorkerServer(HomeServer): "enable_metrics is not True!" ) else: - _base.listen_metrics(listener.bind_addresses, listener.port) + _base.listen_metrics( + listener.bind_addresses, + listener.port, + enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics, + ) else: logger.warning("Unsupported listener type: %s", listener.type) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 68993d91a9..e57a926032 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -307,7 +307,11 @@ class SynapseHomeServer(HomeServer): "enable_metrics is not True!" ) else: - _base.listen_metrics(listener.bind_addresses, listener.port) + _base.listen_metrics( + listener.bind_addresses, + listener.port, + enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics, + ) else: # this shouldn't happen, as the listener type should have been checked # during parsing diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index 3b42be5b5b..f3134834e5 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -42,6 +42,35 @@ class MetricsConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.enable_metrics = config.get("enable_metrics", False) + + """ + ### `enable_legacy_metrics` (experimental) + + **Experimental: this option may be removed or have its behaviour + changed at any time, with no notice.** + + Set to `true` to publish both legacy and non-legacy Prometheus metric names, + or to `false` to only publish non-legacy Prometheus metric names. + Defaults to `true`. Has no effect if `enable_metrics` is `false`. + + Legacy metric names include: + - metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules; + - counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard. + + These legacy metric names are unconventional and not compliant with OpenMetrics standards. + They are included for backwards compatibility. + + Example configuration: + ```yaml + enable_legacy_metrics: false + ``` + + See https://github.com/matrix-org/synapse/issues/11106 for context. + + *Since v1.67.0.* + """ + self.enable_legacy_metrics = config.get("enable_legacy_metrics", True) + self.report_stats = config.get("report_stats", None) self.report_stats_endpoint = config.get( "report_stats_endpoint", "https://matrix.org/report-usage-stats/push" diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 496fce2ecc..c3d3daf877 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -46,12 +46,12 @@ from twisted.python.threadpool import ThreadPool # This module is imported for its side effects; flake8 needn't warn that it's unused. import synapse.metrics._reactor_metrics # noqa: F401 -from synapse.metrics._exposition import ( +from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager +from synapse.metrics._legacy_exposition import ( MetricsResource, generate_latest, start_http_server, ) -from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager from synapse.metrics._types import Collector from synapse.util import SYNAPSE_VERSION diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py deleted file mode 100644 index 353d0a63b6..0000000000 --- a/synapse/metrics/_exposition.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2015-2019 Prometheus Python Client Developers -# Copyright 2019 Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This code is based off `prometheus_client/exposition.py` from version 0.7.1. - -Due to the renaming of metrics in prometheus_client 0.4.0, this customised -vendoring of the code will emit both the old versions that Synapse dashboards -expect, and the newer "best practice" version of the up-to-date official client. -""" - -import math -import threading -from http.server import BaseHTTPRequestHandler, HTTPServer -from socketserver import ThreadingMixIn -from typing import Any, Dict, List, Type, Union -from urllib.parse import parse_qs, urlparse - -from prometheus_client import REGISTRY, CollectorRegistry -from prometheus_client.core import Sample - -from twisted.web.resource import Resource -from twisted.web.server import Request - -from synapse.util import caches - -CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8" - - -def floatToGoString(d: Union[int, float]) -> str: - d = float(d) - if d == math.inf: - return "+Inf" - elif d == -math.inf: - return "-Inf" - elif math.isnan(d): - return "NaN" - else: - s = repr(d) - dot = s.find(".") - # Go switches to exponents sooner than Python. - # We only need to care about positive values for le/quantile. - if d > 0 and dot > 6: - mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.") - return f"{mantissa}e+0{dot - 1}" - return s - - -def sample_line(line: Sample, name: str) -> str: - if line.labels: - labelstr = "{{{0}}}".format( - ",".join( - [ - '{}="{}"'.format( - k, - v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""), - ) - for k, v in sorted(line.labels.items()) - ] - ) - ) - else: - labelstr = "" - timestamp = "" - if line.timestamp is not None: - # Convert to milliseconds. - timestamp = f" {int(float(line.timestamp) * 1000):d}" - return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp) - - -def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes: - - # Trigger the cache metrics to be rescraped, which updates the common - # metrics but do not produce metrics themselves - for collector in caches.collectors_by_name.values(): - collector.collect() - - output = [] - - for metric in registry.collect(): - if not metric.samples: - # No samples, don't bother. - continue - - mname = metric.name - mnewname = metric.name - mtype = metric.type - - # OpenMetrics -> Prometheus - if mtype == "counter": - mnewname = mnewname + "_total" - elif mtype == "info": - mtype = "gauge" - mnewname = mnewname + "_info" - elif mtype == "stateset": - mtype = "gauge" - elif mtype == "gaugehistogram": - mtype = "histogram" - elif mtype == "unknown": - mtype = "untyped" - - # Output in the old format for compatibility. - if emit_help: - output.append( - "# HELP {} {}\n".format( - mname, - metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), - ) - ) - output.append(f"# TYPE {mname} {mtype}\n") - - om_samples: Dict[str, List[str]] = {} - for s in metric.samples: - for suffix in ["_created", "_gsum", "_gcount"]: - if s.name == metric.name + suffix: - # OpenMetrics specific sample, put in a gauge at the end. - # (these come from gaugehistograms which don't get renamed, - # so no need to faff with mnewname) - om_samples.setdefault(suffix, []).append(sample_line(s, s.name)) - break - else: - newname = s.name.replace(mnewname, mname) - if ":" in newname and newname.endswith("_total"): - newname = newname[: -len("_total")] - output.append(sample_line(s, newname)) - - for suffix, lines in sorted(om_samples.items()): - if emit_help: - output.append( - "# HELP {}{} {}\n".format( - metric.name, - suffix, - metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), - ) - ) - output.append(f"# TYPE {metric.name}{suffix} gauge\n") - output.extend(lines) - - # Get rid of the weird colon things while we're at it - if mtype == "counter": - mnewname = mnewname.replace(":total", "") - mnewname = mnewname.replace(":", "_") - - if mname == mnewname: - continue - - # Also output in the new format, if it's different. - if emit_help: - output.append( - "# HELP {} {}\n".format( - mnewname, - metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), - ) - ) - output.append(f"# TYPE {mnewname} {mtype}\n") - - for s in metric.samples: - # Get rid of the OpenMetrics specific samples (we should already have - # dealt with them above anyway.) - for suffix in ["_created", "_gsum", "_gcount"]: - if s.name == metric.name + suffix: - break - else: - output.append( - sample_line(s, s.name.replace(":total", "").replace(":", "_")) - ) - - return "".join(output).encode("utf-8") - - -class MetricsHandler(BaseHTTPRequestHandler): - """HTTP handler that gives metrics from ``REGISTRY``.""" - - registry = REGISTRY - - def do_GET(self) -> None: - registry = self.registry - params = parse_qs(urlparse(self.path).query) - - if "help" in params: - emit_help = True - else: - emit_help = False - - try: - output = generate_latest(registry, emit_help=emit_help) - except Exception: - self.send_error(500, "error generating metric output") - raise - self.send_response(200) - self.send_header("Content-Type", CONTENT_TYPE_LATEST) - self.send_header("Content-Length", str(len(output))) - self.end_headers() - self.wfile.write(output) - - def log_message(self, format: str, *args: Any) -> None: - """Log nothing.""" - - @classmethod - def factory(cls, registry: CollectorRegistry) -> Type: - """Returns a dynamic MetricsHandler class tied - to the passed registry. - """ - # This implementation relies on MetricsHandler.registry - # (defined above and defaulted to REGISTRY). - - # As we have unicode_literals, we need to create a str() - # object for type(). - cls_name = str(cls.__name__) - MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry}) - return MyMetricsHandler - - -class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer): - """Thread per request HTTP server.""" - - # Make worker threads "fire and forget". Beginning with Python 3.7 this - # prevents a memory leak because ``ThreadingMixIn`` starts to gather all - # non-daemon threads in a list in order to join on them at server close. - # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the - # same as Python 3.7's ``ThreadingHTTPServer``. - daemon_threads = True - - -def start_http_server( - port: int, addr: str = "", registry: CollectorRegistry = REGISTRY -) -> None: - """Starts an HTTP server for prometheus metrics as a daemon thread""" - CustomMetricsHandler = MetricsHandler.factory(registry) - httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler) - t = threading.Thread(target=httpd.serve_forever) - t.daemon = True - t.start() - - -class MetricsResource(Resource): - """ - Twisted ``Resource`` that serves prometheus metrics. - """ - - isLeaf = True - - def __init__(self, registry: CollectorRegistry = REGISTRY): - self.registry = registry - - def render_GET(self, request: Request) -> bytes: - request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii")) - response = generate_latest(self.registry) - request.setHeader(b"Content-Length", str(len(response))) - return response diff --git a/synapse/metrics/_legacy_exposition.py b/synapse/metrics/_legacy_exposition.py new file mode 100644 index 0000000000..ff640a49af --- /dev/null +++ b/synapse/metrics/_legacy_exposition.py @@ -0,0 +1,284 @@ +# Copyright 2015-2019 Prometheus Python Client Developers +# Copyright 2019 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This code is based off `prometheus_client/exposition.py` from version 0.7.1. + +Due to the renaming of metrics in prometheus_client 0.4.0, this customised +vendoring of the code will emit both the old versions that Synapse dashboards +expect, and the newer "best practice" version of the up-to-date official client. +""" + +import math +import threading +from http.server import BaseHTTPRequestHandler, HTTPServer +from socketserver import ThreadingMixIn +from typing import Any, Dict, List, Type, Union +from urllib.parse import parse_qs, urlparse + +from prometheus_client import REGISTRY, CollectorRegistry +from prometheus_client.core import Sample + +from twisted.web.resource import Resource +from twisted.web.server import Request + +from synapse.util import caches + +CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8" + + +def floatToGoString(d: Union[int, float]) -> str: + d = float(d) + if d == math.inf: + return "+Inf" + elif d == -math.inf: + return "-Inf" + elif math.isnan(d): + return "NaN" + else: + s = repr(d) + dot = s.find(".") + # Go switches to exponents sooner than Python. + # We only need to care about positive values for le/quantile. + if d > 0 and dot > 6: + mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.") + return f"{mantissa}e+0{dot - 1}" + return s + + +def sample_line(line: Sample, name: str) -> str: + if line.labels: + labelstr = "{{{0}}}".format( + ",".join( + [ + '{}="{}"'.format( + k, + v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""), + ) + for k, v in sorted(line.labels.items()) + ] + ) + ) + else: + labelstr = "" + timestamp = "" + if line.timestamp is not None: + # Convert to milliseconds. + timestamp = f" {int(float(line.timestamp) * 1000):d}" + return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp) + + +# Mapping from new metric names to legacy metric names. +# We translate these back to their old names when exposing them through our +# legacy vendored exporter. +# Only this legacy exposition module applies these name changes. +LEGACY_METRIC_NAMES = { + "synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits", + "synapse_util_caches_cache_size": "synapse_util_caches_cache:size", + "synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size", + "synapse_util_caches_cache_total": "synapse_util_caches_cache:total", + "synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size", + "synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits", + "synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size", + "synapse_util_caches_response_cache_total": "synapse_util_caches_response_cache:total", +} + + +def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes: + """ + Generate metrics in legacy format. Modern metrics are generated directly + by prometheus-client. + """ + + # Trigger the cache metrics to be rescraped, which updates the common + # metrics but do not produce metrics themselves + for collector in caches.collectors_by_name.values(): + collector.collect() + + output = [] + + for metric in registry.collect(): + if not metric.samples: + # No samples, don't bother. + continue + + # Translate to legacy metric name if it has one. + mname = LEGACY_METRIC_NAMES.get(metric.name, metric.name) + mnewname = metric.name + mtype = metric.type + + # OpenMetrics -> Prometheus + if mtype == "counter": + mnewname = mnewname + "_total" + elif mtype == "info": + mtype = "gauge" + mnewname = mnewname + "_info" + elif mtype == "stateset": + mtype = "gauge" + elif mtype == "gaugehistogram": + mtype = "histogram" + elif mtype == "unknown": + mtype = "untyped" + + # Output in the old format for compatibility. + if emit_help: + output.append( + "# HELP {} {}\n".format( + mname, + metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), + ) + ) + output.append(f"# TYPE {mname} {mtype}\n") + + om_samples: Dict[str, List[str]] = {} + for s in metric.samples: + for suffix in ["_created", "_gsum", "_gcount"]: + if s.name == mname + suffix: + # OpenMetrics specific sample, put in a gauge at the end. + # (these come from gaugehistograms which don't get renamed, + # so no need to faff with mnewname) + om_samples.setdefault(suffix, []).append(sample_line(s, s.name)) + break + else: + newname = s.name.replace(mnewname, mname) + if ":" in newname and newname.endswith("_total"): + newname = newname[: -len("_total")] + output.append(sample_line(s, newname)) + + for suffix, lines in sorted(om_samples.items()): + if emit_help: + output.append( + "# HELP {}{} {}\n".format( + mname, + suffix, + metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), + ) + ) + output.append(f"# TYPE {mname}{suffix} gauge\n") + output.extend(lines) + + # Get rid of the weird colon things while we're at it + if mtype == "counter": + mnewname = mnewname.replace(":total", "") + mnewname = mnewname.replace(":", "_") + + if mname == mnewname: + continue + + # Also output in the new format, if it's different. + if emit_help: + output.append( + "# HELP {} {}\n".format( + mnewname, + metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), + ) + ) + output.append(f"# TYPE {mnewname} {mtype}\n") + + for s in metric.samples: + # Get rid of the OpenMetrics specific samples (we should already have + # dealt with them above anyway.) + for suffix in ["_created", "_gsum", "_gcount"]: + if s.name == mname + suffix: + break + else: + sample_name = LEGACY_METRIC_NAMES.get(s.name, s.name) + output.append( + sample_line(s, sample_name.replace(":total", "").replace(":", "_")) + ) + + return "".join(output).encode("utf-8") + + +class MetricsHandler(BaseHTTPRequestHandler): + """HTTP handler that gives metrics from ``REGISTRY``.""" + + registry = REGISTRY + + def do_GET(self) -> None: + registry = self.registry + params = parse_qs(urlparse(self.path).query) + + if "help" in params: + emit_help = True + else: + emit_help = False + + try: + output = generate_latest(registry, emit_help=emit_help) + except Exception: + self.send_error(500, "error generating metric output") + raise + self.send_response(200) + self.send_header("Content-Type", CONTENT_TYPE_LATEST) + self.send_header("Content-Length", str(len(output))) + self.end_headers() + self.wfile.write(output) + + def log_message(self, format: str, *args: Any) -> None: + """Log nothing.""" + + @classmethod + def factory(cls, registry: CollectorRegistry) -> Type: + """Returns a dynamic MetricsHandler class tied + to the passed registry. + """ + # This implementation relies on MetricsHandler.registry + # (defined above and defaulted to REGISTRY). + + # As we have unicode_literals, we need to create a str() + # object for type(). + cls_name = str(cls.__name__) + MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry}) + return MyMetricsHandler + + +class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer): + """Thread per request HTTP server.""" + + # Make worker threads "fire and forget". Beginning with Python 3.7 this + # prevents a memory leak because ``ThreadingMixIn`` starts to gather all + # non-daemon threads in a list in order to join on them at server close. + # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the + # same as Python 3.7's ``ThreadingHTTPServer``. + daemon_threads = True + + +def start_http_server( + port: int, addr: str = "", registry: CollectorRegistry = REGISTRY +) -> None: + """Starts an HTTP server for prometheus metrics as a daemon thread""" + CustomMetricsHandler = MetricsHandler.factory(registry) + httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler) + t = threading.Thread(target=httpd.serve_forever) + t.daemon = True + t.start() + + +class MetricsResource(Resource): + """ + Twisted ``Resource`` that serves prometheus metrics. + """ + + isLeaf = True + + def __init__(self, registry: CollectorRegistry = REGISTRY): + self.registry = registry + + def render_GET(self, request: Request) -> bytes: + request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii")) + response = generate_latest(self.registry) + request.setHeader(b"Content-Length", str(len(response))) + return response diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 42f6abb5e1..bdf9b0dc8c 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -34,10 +34,10 @@ TRACK_MEMORY_USAGE = False caches_by_name: Dict[str, Sized] = {} collectors_by_name: Dict[str, "CacheMetric"] = {} -cache_size = Gauge("synapse_util_caches_cache:size", "", ["name"]) -cache_hits = Gauge("synapse_util_caches_cache:hits", "", ["name"]) -cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name", "reason"]) -cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) +cache_size = Gauge("synapse_util_caches_cache_size", "", ["name"]) +cache_hits = Gauge("synapse_util_caches_cache_hits", "", ["name"]) +cache_evicted = Gauge("synapse_util_caches_cache_evicted_size", "", ["name", "reason"]) +cache_total = Gauge("synapse_util_caches_cache_total", "", ["name"]) cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"]) cache_memory_usage = Gauge( "synapse_util_caches_cache_size_bytes", @@ -45,12 +45,12 @@ cache_memory_usage = Gauge( ["name"], ) -response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) -response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) +response_cache_size = Gauge("synapse_util_caches_response_cache_size", "", ["name"]) +response_cache_hits = Gauge("synapse_util_caches_response_cache_hits", "", ["name"]) response_cache_evicted = Gauge( - "synapse_util_caches_response_cache:evicted_size", "", ["name", "reason"] + "synapse_util_caches_response_cache_evicted_size", "", ["name", "reason"] ) -response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"]) +response_cache_total = Gauge("synapse_util_caches_response_cache_total", "", ["name"]) class EvictionReason(Enum): diff --git a/tests/test_metrics.py b/tests/test_metrics.py index b4574b2ffe..1a70eddc9b 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -12,7 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + from importlib import metadata +except ImportError: + import importlib_metadata as metadata # type: ignore[no-redef] +from unittest.mock import patch + +from pkg_resources import parse_version + +from synapse.app._base import _set_prometheus_client_use_created_metrics from synapse.metrics import REGISTRY, InFlightGauge, generate_latest from synapse.util.caches.deferred_cache import DeferredCache @@ -162,3 +171,30 @@ class CacheMetricsTests(unittest.HomeserverTestCase): self.assertEqual(items["synapse_util_caches_cache_size"], "1.0") self.assertEqual(items["synapse_util_caches_cache_max_size"], "777.0") + + +class PrometheusMetricsHackTestCase(unittest.HomeserverTestCase): + if parse_version(metadata.version("prometheus_client")) < parse_version("0.14.0"): + skip = "prometheus-client too old" + + def test_created_metrics_disabled(self) -> None: + """ + Tests that a brittle hack, to disable `_created` metrics, works. + This involves poking at the internals of prometheus-client. + It's not the end of the world if this doesn't work. + + This test gives us a way to notice if prometheus-client changes + their internals. + """ + import prometheus_client.metrics + + PRIVATE_FLAG_NAME = "_use_created" + + # By default, the pesky `_created` metrics are enabled. + # Check this assumption is still valid. + self.assertTrue(getattr(prometheus_client.metrics, PRIVATE_FLAG_NAME)) + + with patch("prometheus_client.metrics") as mock: + setattr(mock, PRIVATE_FLAG_NAME, True) + _set_prometheus_client_use_created_metrics(False) + self.assertFalse(getattr(mock, PRIVATE_FLAG_NAME, False)) -- cgit 1.4.1 From 371db86a86f8876939449d5553726e69e623c898 Mon Sep 17 00:00:00 2001 From: Andy Balaam Date: Wed, 24 Aug 2022 13:59:33 +0100 Subject: First draft of triage_labelled action (#13612) --- .github/workflows/triage_labelled.yml | 44 +++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 .github/workflows/triage_labelled.yml diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml new file mode 100644 index 0000000000..fbd55de17f --- /dev/null +++ b/.github/workflows/triage_labelled.yml @@ -0,0 +1,44 @@ +name: Move labelled issues to correct projects + +on: + issues: + types: [ labeled ] + +jobs: + move_needs_info: + name: Move X-Needs-Info on the triage board + runs-on: ubuntu-latest + if: > + contains(github.event.issue.labels.*.name, 'X-Needs-Info') + steps: + - uses: octokit/graphql-action@v2.x + id: add_to_project + with: + headers: '{"GraphQL-Features": "projects_next_graphql"}' + query: | + mutation { + updateProjectV2ItemFieldValue( + input: { + projectId: $projectid + itemId: $contentid + fieldId: $fieldid + value: { + singleSelectOptionId: "Todo" + } + } + ) { + projectV2Item { + id + } + } + } + + projectid: ${{ env.PROJECT_ID }} + contentid: ${{ github.event.issue.node_id }} + fieldid: ${{ env.FIELD_ID }} + optionid: ${{ env.OPTION_ID }} + env: + PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ" + GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }} + FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" + OPTION_ID: "ba22e43c" -- cgit 1.4.1 From c807b814ae78b6adb5db6485a3ba7de79e36c826 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Aug 2022 11:14:28 -0500 Subject: Use dedicated `get_local_users_in_room` to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request (#13606) Use dedicated `get_local_users_in_room` to find local users when calculating `join_authorised_via_users_server` ("the authorising user for joining a restricted room") of a `/make_join` request. Found while working on https://github.com/matrix-org/synapse/pull/13575#discussion_r953023755 but it's not related. --- changelog.d/13606.misc | 1 + synapse/handlers/event_auth.py | 9 +++------ 2 files changed, 4 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13606.misc diff --git a/changelog.d/13606.misc b/changelog.d/13606.misc new file mode 100644 index 0000000000..58a4467798 --- /dev/null +++ b/changelog.d/13606.misc @@ -0,0 +1 @@ +Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index a2dd9c7efa..c3ddc5d182 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -129,12 +129,9 @@ class EventAuthHandler: else: users = {} - # Find the user with the highest power level. - users_in_room = await self._store.get_users_in_room(room_id) - # Only interested in local users. - local_users_in_room = [ - u for u in users_in_room if get_domain_from_id(u) == self._server_name - ] + # Find the user with the highest power level (only interested in local + # users). + local_users_in_room = await self._store.get_local_users_in_room(room_id) chosen_user = max( local_users_in_room, key=lambda user: users.get(user, users_default_level), -- cgit 1.4.1 From b93bd95e8ab64d27ae26841020f62ee61272a5f2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Aug 2022 12:53:46 -0500 Subject: When loading current ids, sort by `stream_id` to avoid incorrect overwrite and avoid errors caused by sorting alphabetical instance name which can be `null` (#13585) When loading current ids, sort by stream ID so that we don't want to overwrite the `current_position` of an instance to a lower stream ID than we're actually at ([discussion](https://github.com/matrix-org/synapse/pull/13585#discussion_r951795379)). Previously, it sorted alphabetically by instance name which can be `null` and throw errors but more importantly, accomplishes nothing. Fixes the following startup error which is why I started looking into this area: ``` $ poetry run synapse_homeserver --config-path homeserver.yaml **************************************************************** Error during initialisation: '<' not supported between instances of 'NoneType' and 'str' There may be more information in the logs. **************************************************************** ``` Somehow my database ended up looking like the following, notice the `instance_name` is `null` in the db, and we can't sort `NoneType` things. Another question is why do we see the `instance_name` as `null` sometimes instead of `master` in monolith mode? ``` $ psql synapse synapse=# SELECT * FROM stream_positions; stream_name | instance_name | stream_id -----------------+---------------+----------- account_data | master | 1242 events | master | 1787 to_device | master | 58 presence_stream | master | 485638 receipts | master | 341 backfill | master | -139106 (6 rows) synapse=# SELECT instance_name, stream_id FROM receipts_linearized; instance_name | stream_id ---------------+----------- | 211 | 3 | 4 | 212 | 213 | 224 | 228 | 164 | 313 | 253 | 38 | 321 | 324 | 189 | 192 | 193 | 194 | 195 | 197 | 198 | 275 | 79 | 339 | 340 | 82 | 341 | 84 | 85 | 91 | 119 ``` --- changelog.d/13585.bugfix | 1 + synapse/storage/util/id_generators.py | 13 +++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13585.bugfix diff --git a/changelog.d/13585.bugfix b/changelog.d/13585.bugfix new file mode 100644 index 0000000000..664b986c59 --- /dev/null +++ b/changelog.d/13585.bugfix @@ -0,0 +1 @@ +Fix loading the current stream position behind the actual position. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 3c13859faa..2dfe4c0b66 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -460,8 +460,17 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): # Cast safety: this corresponds to the types returned by the query above. rows.extend(cast(Iterable[Tuple[str, int]], cur)) - # Sort so that we handle rows in order for each instance. - rows.sort() + # Sort by stream_id (ascending, lowest -> highest) so that we handle + # rows in order for each instance because we don't want to overwrite + # the current_position of an instance to a lower stream ID than + # we're actually at. + def sort_by_stream_id_key_func(row: Tuple[str, int]) -> int: + (instance, stream_id) = row + # If `stream_id` is ever `None`, we will see a `TypeError: '<' + # not supported between instances of 'NoneType' and 'X'` error. + return stream_id + + rows.sort(key=sort_by_stream_id_key_func) with self._lock: for ( -- cgit 1.4.1 From d58615c82cec5bd866bedcb33e3e2a5d2a961c44 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Aug 2022 14:13:12 -0500 Subject: Directly lookup local membership instead of getting all members in a room first (`get_users_in_room` mis-use) (#13608) See https://github.com/matrix-org/synapse/pull/13575#discussion_r953023755 --- changelog.d/13608.misc | 1 + synapse/handlers/events.py | 9 +++++--- synapse/handlers/message.py | 6 ++++-- synapse/handlers/room.py | 7 +++++-- synapse/handlers/room_member.py | 6 ++++-- synapse/server_notices/server_notices_manager.py | 10 +++++++-- synapse/storage/databases/main/roommember.py | 26 ++++++++++++++++++++++++ tests/rest/client/test_relations.py | 12 +++++------ 8 files changed, 60 insertions(+), 17 deletions(-) create mode 100644 changelog.d/13608.misc diff --git a/changelog.d/13608.misc b/changelog.d/13608.misc new file mode 100644 index 0000000000..19bcc45e33 --- /dev/null +++ b/changelog.d/13608.misc @@ -0,0 +1 @@ +Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index ac13340d3a..949b69cb41 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -151,7 +151,7 @@ class EventHandler: """Retrieve a single specified event. Args: - user: The user requesting the event + user: The local user requesting the event room_id: The expected room id. We'll return None if the event's room does not match. event_id: The event ID to obtain. @@ -173,8 +173,11 @@ class EventHandler: if not event: return None - users = await self.store.get_users_in_room(event.room_id) - is_peeking = user.to_string() not in users + is_user_in_room = await self.store.check_local_user_in_room( + user_id=user.to_string(), room_id=event.room_id + ) + # The user is peeking if they aren't in the room already + is_peeking = not is_user_in_room filtered = await filter_events_for_client( self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index acd3de06f6..72157d5a36 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -761,8 +761,10 @@ class EventCreationHandler: async def _is_server_notices_room(self, room_id: str) -> bool: if self.config.servernotices.server_notices_mxid is None: return False - user_ids = await self.store.get_users_in_room(room_id) - return self.config.servernotices.server_notices_mxid in user_ids + is_server_notices_room = await self.store.check_local_user_in_room( + user_id=self.config.servernotices.server_notices_mxid, room_id=room_id + ) + return is_server_notices_room async def assert_accepted_privacy_policy(self, requester: Requester) -> None: """Check if a user has accepted the privacy policy diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2bf0ebd025..2fc8264858 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1284,8 +1284,11 @@ class RoomContextHandler: before_limit = math.floor(limit / 2.0) after_limit = limit - before_limit - users = await self.store.get_users_in_room(room_id) - is_peeking = user.to_string() not in users + is_user_in_room = await self.store.check_local_user_in_room( + user_id=user.to_string(), room_id=room_id + ) + # The user is peeking if they aren't in the room already + is_peeking = not is_user_in_room async def filter_evts(events: List[EventBase]) -> List[EventBase]: if use_admin_priviledge: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 65b9a655d4..709682622f 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1620,8 +1620,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def _is_server_notice_room(self, room_id: str) -> bool: if self._server_notices_mxid is None: return False - user_ids = await self.store.get_users_in_room(room_id) - return self._server_notices_mxid in user_ids + is_server_notices_room = await self.store.check_local_user_in_room( + user_id=self._server_notices_mxid, room_id=room_id + ) + return is_server_notices_room class RoomMemberMasterHandler(RoomMemberHandler): diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 70d054a8f4..564e3705c2 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -102,6 +102,10 @@ class ServerNoticesManager: Returns: The room's ID, or None if no room could be found. """ + # If there is no server notices MXID, then there is no server notices room + if self.server_notices_mxid is None: + return None + rooms = await self._store.get_rooms_for_local_user_where_membership_is( user_id, [Membership.INVITE, Membership.JOIN] ) @@ -111,8 +115,10 @@ class ServerNoticesManager: # be joined. This is kinda deliberate, in that if somebody somehow # manages to invite the system user to a room, that doesn't make it # the server notices room. - user_ids = await self._store.get_users_in_room(room.room_id) - if len(user_ids) <= 2 and self.server_notices_mxid in user_ids: + is_server_notices_room = await self._store.check_local_user_in_room( + user_id=self.server_notices_mxid, room_id=room.room_id + ) + if is_server_notices_room: # we found a room which our user shares with the system notice # user return room.room_id diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 046ad3a11c..9e5034b401 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -534,6 +534,32 @@ class RoomMemberWorkerStore(EventsWorkerStore): desc="get_local_users_in_room", ) + async def check_local_user_in_room(self, user_id: str, room_id: str) -> bool: + """ + Check whether a given local user is currently joined to the given room. + + Returns: + A boolean indicating whether the user is currently joined to the room + + Raises: + Exeption when called with a non-local user to this homeserver + """ + if not self.hs.is_mine_id(user_id): + raise Exception( + "Cannot call 'check_local_user_in_room' on " + "non-local user %s" % (user_id,), + ) + + ( + membership, + member_event_id, + ) = await self.get_local_current_membership_for_user_in_room( + user_id=user_id, + room_id=room_id, + ) + + return membership == Membership.JOIN + async def get_local_current_membership_for_user_in_room( self, user_id: str, room_id: str ) -> Tuple[Optional[str], Optional[str]]: diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index d589f07314..651f4f415d 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -999,7 +999,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): bundled_aggregations, ) - self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 6) + self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7) def test_annotation_to_annotation(self) -> None: """Any relation to an annotation should be ignored.""" @@ -1035,7 +1035,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): bundled_aggregations, ) - self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 6) + self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7) def test_thread(self) -> None: """ @@ -1080,21 +1080,21 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): # The "user" sent the root event and is making queries for the bundled # aggregations: they have participated. - self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 8) + self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 9) # The "user2" sent replies in the thread and is making queries for the # bundled aggregations: they have participated. # # Note that this re-uses some cached values, so the total number of # queries is much smaller. self._test_bundled_aggregations( - RelationTypes.THREAD, _gen_assert(True), 2, access_token=self.user2_token + RelationTypes.THREAD, _gen_assert(True), 3, access_token=self.user2_token ) # A user with no interactions with the thread: they have not participated. user3_id, user3_token = self._create_user("charlie") self.helper.join(self.room, user=user3_id, tok=user3_token) self._test_bundled_aggregations( - RelationTypes.THREAD, _gen_assert(False), 2, access_token=user3_token + RelationTypes.THREAD, _gen_assert(False), 3, access_token=user3_token ) def test_thread_with_bundled_aggregations_for_latest(self) -> None: @@ -1142,7 +1142,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): bundled_aggregations["latest_event"].get("unsigned"), ) - self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 8) + self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9) def test_nested_thread(self) -> None: """ -- cgit 1.4.1 From 1a209efdb2a6c51e52dd277de7581099852877ae Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Aug 2022 14:15:37 -0500 Subject: Update `get_users_in_room` mis-use to get hosts with dedicated `get_current_hosts_in_room` (#13605) See https://github.com/matrix-org/synapse/pull/13575#discussion_r953023755 --- changelog.d/13605.misc | 1 + synapse/handlers/device.py | 8 ++++++-- synapse/handlers/directory.py | 12 +++++++----- synapse/handlers/presence.py | 3 +-- synapse/handlers/typing.py | 7 ++++--- tests/federation/test_federation_sender.py | 17 ++++++++++++----- 6 files changed, 31 insertions(+), 17 deletions(-) create mode 100644 changelog.d/13605.misc diff --git a/changelog.d/13605.misc b/changelog.d/13605.misc new file mode 100644 index 0000000000..88d518383b --- /dev/null +++ b/changelog.d/13605.misc @@ -0,0 +1 @@ +Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index f5c586f657..9c2c3a0e68 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -310,6 +310,7 @@ class DeviceHandler(DeviceWorkerHandler): super().__init__(hs) self.federation_sender = hs.get_federation_sender() + self._storage_controllers = hs.get_storage_controllers() self.device_list_updater = DeviceListUpdater(hs, self) @@ -694,8 +695,11 @@ class DeviceHandler(DeviceWorkerHandler): # Ignore any users that aren't ours if self.hs.is_mine_id(user_id): - joined_user_ids = await self.store.get_users_in_room(room_id) - hosts = {get_domain_from_id(u) for u in joined_user_ids} + hosts = set( + await self._storage_controllers.state.get_current_hosts_in_room( + room_id + ) + ) hosts.discard(self.server_name) # Check if we've already sent this update to some hosts diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 948f66a94d..7127d5aefc 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -30,7 +30,7 @@ from synapse.api.errors import ( from synapse.appservice import ApplicationService from synapse.module_api import NOT_SPAM from synapse.storage.databases.main.directory import RoomAliasMapping -from synapse.types import JsonDict, Requester, RoomAlias, get_domain_from_id +from synapse.types import JsonDict, Requester, RoomAlias if TYPE_CHECKING: from synapse.server import HomeServer @@ -83,8 +83,9 @@ class DirectoryHandler: # TODO(erikj): Add transactions. # TODO(erikj): Check if there is a current association. if not servers: - users = await self.store.get_users_in_room(room_id) - servers = {get_domain_from_id(u) for u in users} + servers = await self._storage_controllers.state.get_current_hosts_in_room( + room_id + ) if not servers: raise SynapseError(400, "Failed to get server list") @@ -287,8 +288,9 @@ class DirectoryHandler: Codes.NOT_FOUND, ) - users = await self.store.get_users_in_room(room_id) - extra_servers = {get_domain_from_id(u) for u in users} + extra_servers = await self._storage_controllers.state.get_current_hosts_in_room( + room_id + ) servers_set = set(extra_servers) | set(servers) # If this server is in the list of servers, return it first. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 741504ba9f..4e575ffbaa 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -2051,8 +2051,7 @@ async def get_interested_remotes( ) for room_id, states in room_ids_to_states.items(): - user_ids = await store.get_users_in_room(room_id) - hosts = {get_domain_from_id(user_id) for user_id in user_ids} + hosts = await store.get_current_hosts_in_room(room_id) for host in hosts: hosts_and_states.setdefault(host, set()).update(states) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index bcac3372a2..a4cd8b8f0c 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -26,7 +26,7 @@ from synapse.metrics.background_process_metrics import ( ) from synapse.replication.tcp.streams import TypingStream from synapse.streams import EventSource -from synapse.types import JsonDict, Requester, StreamKeyType, UserID, get_domain_from_id +from synapse.types import JsonDict, Requester, StreamKeyType, UserID from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -362,8 +362,9 @@ class TypingWriterHandler(FollowerTypingHandler): ) return - users = await self.store.get_users_in_room(room_id) - domains = {get_domain_from_id(u) for u in users} + domains = await self._storage_controllers.state.get_current_hosts_in_room( + room_id + ) if self.server_name in domains: logger.info("Got typing update from %s: %r", user_id, content) diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 01a1db6115..a5aa500ef8 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -173,17 +173,24 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): return c def prepare(self, reactor, clock, hs): - # stub out `get_rooms_for_user` and `get_users_in_room` so that the + test_room_id = "!room:host1" + + # stub out `get_rooms_for_user` and `get_current_hosts_in_room` so that the # server thinks the user shares a room with `@user2:host2` def get_rooms_for_user(user_id): - return defer.succeed({"!room:host1"}) + return defer.succeed({test_room_id}) hs.get_datastores().main.get_rooms_for_user = get_rooms_for_user - def get_users_in_room(room_id): - return defer.succeed({"@user2:host2"}) + async def get_current_hosts_in_room(room_id): + if room_id == test_room_id: + return ["host2"] + + # TODO: We should fail the test when we encounter an unxpected room ID. + # We can't just use `self.fail(...)` here because the app code is greedy + # with `Exception` and will catch it before the test can see it. - hs.get_datastores().main.get_users_in_room = get_users_in_room + hs.get_datastores().main.get_current_hosts_in_room = get_current_hosts_in_room # whenever send_transaction is called, record the edu data self.edus = [] -- cgit 1.4.1 From c406d50d2df3c04e695b826e11c79b3d6326b5ec Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 24 Aug 2022 21:06:31 +0100 Subject: Rename `event_map` to `unpersisted_events` (#13603) --- changelog.d/13603.misc | 1 + synapse/state/v2.py | 69 +++++++++++++++++++++++++++----------------------- 2 files changed, 38 insertions(+), 32 deletions(-) create mode 100644 changelog.d/13603.misc diff --git a/changelog.d/13603.misc b/changelog.d/13603.misc new file mode 100644 index 0000000000..d08eb6cc0a --- /dev/null +++ b/changelog.d/13603.misc @@ -0,0 +1 @@ +Rename `event_map` to `unpersisted_events` when computing the auth differences. diff --git a/synapse/state/v2.py b/synapse/state/v2.py index cf3045f82e..af03851c71 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -271,40 +271,41 @@ async def _get_power_level_for_sender( async def _get_auth_chain_difference( room_id: str, state_sets: Sequence[Mapping[Any, str]], - event_map: Dict[str, EventBase], + unpersisted_events: Dict[str, EventBase], state_res_store: StateResolutionStore, ) -> Set[str]: """Compare the auth chains of each state set and return the set of events - that only appear in some but not all of the auth chains. + that only appear in some, but not all of the auth chains. Args: - state_sets - event_map - state_res_store + state_sets: The input state sets we are trying to resolve across. + unpersisted_events: A map from event ID to EventBase containing all unpersisted + events involved in this resolution. + state_res_store: Returns: - Set of event IDs + The auth difference of the given state sets, as a set of event IDs. """ # The `StateResolutionStore.get_auth_chain_difference` function assumes that # all events passed to it (and their auth chains) have been persisted - # previously. This is not the case for any events in the `event_map`, and so - # we need to manually handle those events. + # previously. We need to manually handle any other events that are yet to be + # persisted. # - # We do this by: - # 1. calculating the auth chain difference for the state sets based on the - # events in `event_map` alone - # 2. replacing any events in the state_sets that are also in `event_map` - # with their auth events (recursively), and then calling - # `store.get_auth_chain_difference` as normal - # 3. adding the results of 1 and 2 together. - - # Map from event ID in `event_map` to their auth event IDs, and their auth - # event IDs if they appear in the `event_map`. This is the intersection of - # the event's auth chain with the events in the `event_map` *plus* their + # We do this in three steps: + # 1. Compute the set of unpersisted events belonging to the auth difference. + # 2. Replacing any unpersisted events in the state_sets with their auth events, + # recursively, until the state_sets contain only persisted events. + # Then we call `store.get_auth_chain_difference` as normal, which computes + # the set of persisted events belonging to the auth difference. + # 3. Adding the results of 1 and 2 together. + + # Map from event ID in `unpersisted_events` to their auth event IDs, and their auth + # event IDs if they appear in the `unpersisted_events`. This is the intersection of + # the event's auth chain with the events in `unpersisted_events` *plus* their # auth event IDs. events_to_auth_chain: Dict[str, Set[str]] = {} - for event in event_map.values(): + for event in unpersisted_events.values(): chain = {event.event_id} events_to_auth_chain[event.event_id] = chain @@ -312,16 +313,16 @@ async def _get_auth_chain_difference( while to_search: for auth_id in to_search.pop().auth_event_ids(): chain.add(auth_id) - auth_event = event_map.get(auth_id) + auth_event = unpersisted_events.get(auth_id) if auth_event: to_search.append(auth_event) - # We now a) calculate the auth chain difference for the unpersisted events - # and b) work out the state sets to pass to the store. + # We now 1) calculate the auth chain difference for the unpersisted events + # and 2) work out the state sets to pass to the store. # - # Note: If the `event_map` is empty (which is the common case), we can do a + # Note: If there are no `unpersisted_events` (which is the common case), we can do a # much simpler calculation. - if event_map: + if unpersisted_events: # The list of state sets to pass to the store, where each state set is a set # of the event ids making up the state. This is similar to `state_sets`, # except that (a) we only have event ids, not the complete @@ -344,14 +345,18 @@ async def _get_auth_chain_difference( for event_id in state_set.values(): event_chain = events_to_auth_chain.get(event_id) if event_chain is not None: - # We have an event in `event_map`. We add all the auth - # events that it references (that aren't also in `event_map`). - set_ids.update(e for e in event_chain if e not in event_map) + # We have an unpersisted event. We add all the auth + # events that it references which are also unpersisted. + set_ids.update( + e for e in event_chain if e not in unpersisted_events + ) # We also add the full chain of unpersisted event IDs # referenced by this state set, so that we can work out the # auth chain difference of the unpersisted events. - unpersisted_ids.update(e for e in event_chain if e in event_map) + unpersisted_ids.update( + e for e in event_chain if e in unpersisted_events + ) else: set_ids.add(event_id) @@ -361,15 +366,15 @@ async def _get_auth_chain_difference( union = unpersisted_set_ids[0].union(*unpersisted_set_ids[1:]) intersection = unpersisted_set_ids[0].intersection(*unpersisted_set_ids[1:]) - difference_from_event_map: Collection[str] = union - intersection + auth_difference_unpersisted_part: Collection[str] = union - intersection else: - difference_from_event_map = () + auth_difference_unpersisted_part = () state_sets_ids = [set(state_set.values()) for state_set in state_sets] difference = await state_res_store.get_auth_chain_difference( room_id, state_sets_ids ) - difference.update(difference_from_event_map) + difference.update(auth_difference_unpersisted_part) return difference -- cgit 1.4.1 From 0bf180cbb43f6e3d489ddaa5d66226b6daeea027 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Aug 2022 18:59:27 -0500 Subject: Comment about a better future where we can get the state diff between two events (#13586) Split off from https://github.com/matrix-org/synapse/pull/13561 Part of https://github.com/matrix-org/synapse/issues/13356 Mentioned in [internal doc](https://docs.google.com/document/d/1lvUoVfYUiy6UaHB6Rb4HicjaJAU40-APue9Q4vzuW3c/edit#bookmark=id.2tvwz3yhcafh) --- changelog.d/13586.misc | 1 + synapse/handlers/federation_event.py | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/13586.misc diff --git a/changelog.d/13586.misc b/changelog.d/13586.misc new file mode 100644 index 0000000000..daa71470f6 --- /dev/null +++ b/changelog.d/13586.misc @@ -0,0 +1 @@ +Comment about a better future where we can get the state diff between two events. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 048c4111f6..ace7adcffb 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1041,6 +1041,14 @@ class FederationEventHandler: InvalidResponseError: if the remote homeserver's response contains fields of the wrong type. """ + + # It would be better if we could query the difference from our known + # state to the given `event_id` so the sending server doesn't have to + # send as much and we don't have to process as many events. For example + # in a room like #matrix:matrix.org, we get 200k events (77k state_events, 122k + # auth_events) from this call. + # + # Tracked by https://github.com/matrix-org/synapse/issues/13618 ( state_event_ids, auth_event_ids, -- cgit 1.4.1 From a2824465027c9036b98e56a52b53c80ca2aaa71b Mon Sep 17 00:00:00 2001 From: Kat Gerasimova Date: Thu, 25 Aug 2022 12:09:23 +0100 Subject: Update automation for incoming issues (#13629) GitHub appears to be deprecating addProjectNextItem by not allowing it to be used alongside projectV2 to get the project ID, so switching to using addProjectV2ItemById instead. --- .github/workflows/triage-incoming.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml index 8467970128..f926bcb759 100644 --- a/.github/workflows/triage-incoming.yml +++ b/.github/workflows/triage-incoming.yml @@ -14,13 +14,13 @@ jobs: with: headers: '{"GraphQL-Features": "projects_next_graphql"}' query: | - mutation add_to_project($projectid:ID!,$contentid:ID!) { - addProjectNextItem(input:{projectId:$projectid contentId:$contentid}) { - projectNextItem { + mutation add_to_project($projectid:ID!,$contentid:ID!) { + addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) { + item { id + } } } - } projectid: ${{ env.PROJECT_ID }} contentid: ${{ github.event.issue.node_id }} env: -- cgit 1.4.1 From a2ce6144479c9f0400517d505c7288f2ab785a95 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 25 Aug 2022 15:29:08 +0100 Subject: register_new_matrix_user: read server url from config (#13616) Fixes https://github.com/matrix-org/synapse/issues/3672: `https://localhost:8448` is virtually never right. --- changelog.d/13616.bugfix | 1 + synapse/_scripts/register_new_matrix_user.py | 57 +++++++++++++++++++++++++--- 2 files changed, 52 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13616.bugfix diff --git a/changelog.d/13616.bugfix b/changelog.d/13616.bugfix new file mode 100644 index 0000000000..f2c48d1d8d --- /dev/null +++ b/changelog.d/13616.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 092601f530..42ae0dbea3 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -20,11 +20,13 @@ import hashlib import hmac import logging import sys -from typing import Callable, Optional +from typing import Any, Callable, Dict, Optional import requests import yaml +_DEFAULT_SERVER_URL = "http://localhost:8008" + def request_registration( user: str, @@ -203,31 +205,74 @@ def main() -> None: parser.add_argument( "server_url", - default="https://localhost:8448", nargs="?", - help="URL to use to talk to the homeserver. Defaults to " - " 'https://localhost:8448'.", + help="URL to use to talk to the homeserver. By default, tries to find a " + "suitable URL from the configuration file. Otherwise, defaults to " + f"'{_DEFAULT_SERVER_URL}'.", ) args = parser.parse_args() if "config" in args and args.config: config = yaml.safe_load(args.config) + + if args.shared_secret: + secret = args.shared_secret + else: + # argparse should check that we have either config or shared secret + assert config + secret = config.get("registration_shared_secret", None) if not secret: print("No 'registration_shared_secret' defined in config.") sys.exit(1) + + if args.server_url: + server_url = args.server_url + elif config: + server_url = _find_client_listener(config) + if not server_url: + server_url = _DEFAULT_SERVER_URL + print( + "Unable to find a suitable HTTP listener in the configuration file. " + f"Trying {server_url} as a last resort.", + file=sys.stderr, + ) else: - secret = args.shared_secret + server_url = _DEFAULT_SERVER_URL + print( + f"No server url or configuration file given. Defaulting to {server_url}.", + file=sys.stderr, + ) admin = None if args.admin or args.no_admin: admin = args.admin register_new_user( - args.user, args.password, args.server_url, secret, admin, args.user_type + args.user, args.password, server_url, secret, admin, args.user_type ) +def _find_client_listener(config: Dict[str, Any]) -> Optional[str]: + # try to find a listener in the config. Returns a host:port pair + for listener in config.get("listeners", []): + if listener.get("type") != "http" or listener.get("tls", False): + continue + + if not any( + name == "client" + for resource in listener.get("resources", []) + for name in resource.get("names", []) + ): + continue + + # TODO: consider bind_addresses + return f"http://localhost:{listener['port']}" + + # no suitable listeners? + return None + + if __name__ == "__main__": main() -- cgit 1.4.1 From d092e6f32a1a3d79337774746720a73762a35e8e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 25 Aug 2022 17:27:46 +0100 Subject: Support `registration_shared_secret` in a file (#13614) A new `registration_shared_secret_path` option. This is kinda handy for k8s deployments and things. --- changelog.d/13614.feature | 1 + docs/usage/configuration/config_documentation.md | 18 ++++++++++ synapse/_scripts/register_new_matrix_user.py | 45 ++++++++++++++++++++++-- synapse/config/registration.py | 33 +++++++++++++++-- 4 files changed, 92 insertions(+), 5 deletions(-) create mode 100644 changelog.d/13614.feature diff --git a/changelog.d/13614.feature b/changelog.d/13614.feature new file mode 100644 index 0000000000..fa177ead09 --- /dev/null +++ b/changelog.d/13614.feature @@ -0,0 +1 @@ +Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 8ae018e628..e9ab58854e 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2124,10 +2124,28 @@ registration_requires_token: true If set, allows registration of standard or admin accounts by anyone who has the shared secret, even if registration is otherwise disabled. +See also [`registration_shared_secret_path`](#registration_shared_secret_path). + Example configuration: ```yaml registration_shared_secret: ``` + +--- +### `registration_shared_secret_path` + +An alternative to [`registration_shared_secret`](#registration_shared_secret): +allows the shared secret to be specified in an external file. + +The file should be a plain text file, containing only the shared secret. + +Example configuration: +```yaml +registration_shared_secret_file: /path/to/secrets/file +``` + +_Added in Synapse 1.67.0._ + --- ### `bcrypt_rounds` diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 42ae0dbea3..0c4504d5d8 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -1,6 +1,6 @@ # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector -# Copyright 2021 The Matrix.org Foundation C.I.C. +# Copyright 2021-22 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +25,15 @@ from typing import Any, Callable, Dict, Optional import requests import yaml +_CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\ +Conflicting options 'registration_shared_secret' and 'registration_shared_secret_path' +are both defined in config file. +""" + +_NO_SHARED_SECRET_OPTS_ERROR = """\ +No 'registration_shared_secret' or 'registration_shared_secret_path' defined in config. +""" + _DEFAULT_SERVER_URL = "http://localhost:8008" @@ -222,9 +231,15 @@ def main() -> None: # argparse should check that we have either config or shared secret assert config - secret = config.get("registration_shared_secret", None) + secret = config.get("registration_shared_secret") + secret_file = config.get("registration_shared_secret_path") + if secret_file: + if secret: + print(_CONFLICTING_SHARED_SECRET_OPTS_ERROR, file=sys.stderr) + sys.exit(1) + secret = _read_file(secret_file, "registration_shared_secret_path").strip() if not secret: - print("No 'registration_shared_secret' defined in config.") + print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr) sys.exit(1) if args.server_url: @@ -254,6 +269,30 @@ def main() -> None: ) +def _read_file(file_path: Any, config_path: str) -> str: + """Check the given file exists, and read it into a string + + If it does not, exit with an error indicating the problem + + Args: + file_path: the file to be read + config_path: where in the configuration file_path came from, so that a useful + error can be emitted if it does not exist. + Returns: + content of the file. + """ + if not isinstance(file_path, str): + print(f"{config_path} setting is not a string", file=sys.stderr) + sys.exit(1) + + try: + with open(file_path) as file_stream: + return file_stream.read() + except OSError as e: + print(f"Error accessing file {file_path}: {e}", file=sys.stderr) + sys.exit(1) + + def _find_client_listener(config: Dict[str, Any]) -> Optional[str]: # try to find a listener in the config. Returns a host:port pair for listener in config.get("listeners", []): diff --git a/synapse/config/registration.py b/synapse/config/registration.py index a888d976f2..df1d83dfaa 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import argparse -from typing import Any, Optional +from typing import Any, Dict, Optional from synapse.api.constants import RoomCreationPreset -from synapse.config._base import Config, ConfigError +from synapse.config._base import Config, ConfigError, read_file from synapse.types import JsonDict, RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool @@ -27,6 +27,11 @@ password resets, configure Synapse with an SMTP server via the `email` setting, remove `account_threepid_delegates.email`. """ +CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\ +You have configured both `registration_shared_secret` and +`registration_shared_secret_path`. These are mutually incompatible. +""" + class RegistrationConfig(Config): section = "registration" @@ -53,7 +58,16 @@ class RegistrationConfig(Config): self.enable_registration_token_3pid_bypass = config.get( "enable_registration_token_3pid_bypass", False ) + + # read the shared secret, either inline or from an external file self.registration_shared_secret = config.get("registration_shared_secret") + registration_shared_secret_path = config.get("registration_shared_secret_path") + if registration_shared_secret_path: + if self.registration_shared_secret: + raise ConfigError(CONFLICTING_SHARED_SECRET_OPTS_ERROR) + self.registration_shared_secret = read_file( + registration_shared_secret_path, ("registration_shared_secret_path",) + ).strip() self.bcrypt_rounds = config.get("bcrypt_rounds", 12) @@ -218,6 +232,21 @@ class RegistrationConfig(Config): else: return "" + def generate_files(self, config: Dict[str, Any], config_dir_path: str) -> None: + # if 'registration_shared_secret_path' is specified, and the target file + # does not exist, generate it. + registration_shared_secret_path = config.get("registration_shared_secret_path") + if registration_shared_secret_path and not self.path_exists( + registration_shared_secret_path + ): + print( + "Generating registration shared secret file " + + registration_shared_secret_path + ) + secret = random_string_with_symbols(50) + with open(registration_shared_secret_path, "w") as f: + f.write(f"{secret}\n") + @staticmethod def add_arguments(parser: argparse.ArgumentParser) -> None: reg_group = parser.add_argument_group("registration") -- cgit 1.4.1 From 978666a088aacf467d3d265ff3b7b09c0d19733d Mon Sep 17 00:00:00 2001 From: Jörg Behrmann Date: Thu, 25 Aug 2022 18:56:55 +0200 Subject: Debian packaging: explicitly allocate a group for the system user (#13593) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise the files of the synapse user are readable by the nobody user, which is unsafe. Signed-off-by: Jörg Behrmann --- debian/changelog | 6 ++++++ debian/matrix-synapse-py3.postinst | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index c3974261a9..605e660928 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.66.0~rc1+nmu1) UNRELEASED; urgency=medium + + * Allocate a group for the system user. + + -- Jörg Behrmann Tue, 23 Aug 2022 17:17:00 +0100 + matrix-synapse-py3 (1.66.0~rc1) stable; urgency=medium * New Synapse release 1.66.0rc1. diff --git a/debian/matrix-synapse-py3.postinst b/debian/matrix-synapse-py3.postinst index 029b9e0243..3c72b69bb7 100644 --- a/debian/matrix-synapse-py3.postinst +++ b/debian/matrix-synapse-py3.postinst @@ -40,12 +40,12 @@ EOF /opt/venvs/matrix-synapse/lib/manage_debconf.pl update if ! getent passwd $USER >/dev/null; then - adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER + adduser --quiet --system --group --no-create-home --home /var/lib/matrix-synapse $USER fi for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then - dpkg-statoverride --force --quiet --update --add $USER nogroup 0755 $DIR + dpkg-statoverride --force --quiet --update --add $USER "$(id -gn $USER)" 0755 $DIR fi done -- cgit 1.4.1 From 967d7bad6c1ea97fb234c3e6d0cbc87b73bb7d0a Mon Sep 17 00:00:00 2001 From: Brad Murray Date: Fri, 26 Aug 2022 03:38:10 -0400 Subject: Move the execution of the retention purge_jobs to the main worker (#13632) Fixes #9927 Signed-off-by: Brad Murray brad@beeper.com --- changelog.d/13632.bugfix | 1 + synapse/handlers/pagination.py | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13632.bugfix diff --git a/changelog.d/13632.bugfix b/changelog.d/13632.bugfix new file mode 100644 index 0000000000..e4b7b403cd --- /dev/null +++ b/changelog.d/13632.bugfix @@ -0,0 +1 @@ +Fix the running of MSC1763 retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 74e944bce7..a0c39778ab 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -159,11 +159,9 @@ class PaginationHandler: self._retention_allowed_lifetime_max = ( hs.config.retention.retention_allowed_lifetime_max ) + self._is_master = hs.config.worker.worker_app is None - if ( - hs.config.worker.run_background_tasks - and hs.config.retention.retention_enabled - ): + if hs.config.retention.retention_enabled and self._is_master: # Run the purge jobs described in the configuration file. for job in hs.config.retention.retention_purge_jobs: logger.info("Setting up purge job with config: %s", job) -- cgit 1.4.1 From 998e211836b99cbe3d07e594c3c3d1d4ebea5aae Mon Sep 17 00:00:00 2001 From: Jörg Behrmann Date: Fri, 26 Aug 2022 10:10:54 +0200 Subject: Update debhelper (#13594) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update debian packaging to debhelper version 12 Don't call dh_installinit anymore, because it has been deprecated, and use dh_installsystemd instead of dh_systemd_enable for the same reason. Signed-off-by: Jörg Behrmann * Drop preinst script It was used for reasons of interactions of dh_systemd_start and dh_installinit, which have both be deprecated Signed-off-by: Jörg Behrmann * Drop /etc/default file It was no longer being installed. * Remove debian/compat file This is managed by the control file nowadays --- debian/changelog | 2 ++ debian/compat | 1 - debian/control | 2 +- debian/matrix-synapse-py3.preinst | 31 ------------------------------- debian/matrix-synapse.default | 2 -- debian/matrix-synapse.service | 6 +++++- debian/rules | 12 +++++++----- 7 files changed, 15 insertions(+), 41 deletions(-) delete mode 100644 debian/compat delete mode 100644 debian/matrix-synapse-py3.preinst delete mode 100644 debian/matrix-synapse.default diff --git a/debian/changelog b/debian/changelog index 605e660928..3257adef77 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,7 @@ matrix-synapse-py3 (1.66.0~rc1+nmu1) UNRELEASED; urgency=medium + * Update debhelper to compatibility level 12. + * Drop the preinst script stopping synapse. * Allocate a group for the system user. -- Jörg Behrmann Tue, 23 Aug 2022 17:17:00 +0100 diff --git a/debian/compat b/debian/compat deleted file mode 100644 index f599e28b8a..0000000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -10 diff --git a/debian/control b/debian/control index 412a9e1d4c..86f5a66d02 100644 --- a/debian/control +++ b/debian/control @@ -4,7 +4,7 @@ Priority: extra Maintainer: Synapse Packaging team # keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv. Build-Depends: - debhelper (>= 10), + debhelper-compat (= 12), dh-virtualenv (>= 1.1), libsystemd-dev, libpq-dev, diff --git a/debian/matrix-synapse-py3.preinst b/debian/matrix-synapse-py3.preinst deleted file mode 100644 index 4b5612f050..0000000000 --- a/debian/matrix-synapse-py3.preinst +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -e - -# Attempt to undo some of the braindamage caused by -# https://github.com/matrix-org/package-synapse-debian/issues/18. -# -# Due to reasons [1], the old python2 matrix-synapse package will not stop the -# service when the package is uninstalled. Our maintainer scripts will do the -# right thing in terms of ensuring the service is enabled and unmasked, but -# then do a `systemctl start matrix-synapse`, which of course does nothing - -# leaving the old (py2) service running. -# -# There should normally be no reason for the service to be running during our -# preinst, so we assume that if it *is* running, it's due to that situation, -# and stop it. -# -# [1] dh_systemd_start doesn't do anything because it sees that there is an -# init.d script with the same name, so leaves it to dh_installinit. -# -# dh_installinit doesn't do anything because somebody gave it a --no-start -# for unknown reasons. - -if [ -x /bin/systemctl ]; then - if /bin/systemctl --quiet is-active -- matrix-synapse; then - echo >&2 "stopping existing matrix-synapse service" - /bin/systemctl stop matrix-synapse || true - fi -fi - -#DEBHELPER# - -exit 0 diff --git a/debian/matrix-synapse.default b/debian/matrix-synapse.default deleted file mode 100644 index f402d73bbf..0000000000 --- a/debian/matrix-synapse.default +++ /dev/null @@ -1,2 +0,0 @@ -# Specify environment variables used when running Synapse -# SYNAPSE_CACHE_FACTOR=0.5 (default) diff --git a/debian/matrix-synapse.service b/debian/matrix-synapse.service index bde1c6cb9f..c3f9660283 100644 --- a/debian/matrix-synapse.service +++ b/debian/matrix-synapse.service @@ -5,7 +5,6 @@ Description=Synapse Matrix homeserver Type=notify User=matrix-synapse WorkingDirectory=/var/lib/matrix-synapse -EnvironmentFile=-/etc/default/matrix-synapse ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ ExecReload=/bin/kill -HUP $MAINPID @@ -13,5 +12,10 @@ Restart=always RestartSec=3 SyslogIdentifier=matrix-synapse +# The environment file is not shipped by default anymore and the below directive +# is for backwards compatibility only. Please use your homeserver.yaml if +# possible. +EnvironmentFile=-/etc/default/matrix-synapse + [Install] WantedBy=multi-user.target diff --git a/debian/rules b/debian/rules index 5baf2475f0..3b79d56074 100755 --- a/debian/rules +++ b/debian/rules @@ -6,15 +6,17 @@ # assume we only have one package PACKAGE_NAME:=`dh_listpackages` -override_dh_systemd_enable: - dh_systemd_enable --name=matrix-synapse - -override_dh_installinit: - dh_installinit --name=matrix-synapse +override_dh_installsystemd: + dh_installsystemd --name=matrix-synapse # we don't really want to strip the symbols from our object files. override_dh_strip: +# many libraries pulled from PyPI have allocatable sections after +# non-allocatable ones on which dwz errors out. For those without the issue the +# gains are only marginal +override_dh_dwz: + # dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files # (executables and shared libs) in the package, and looks for the shared # libraries that they depend on. It then adds a dependency on the package that -- cgit 1.4.1 From 5e5c8150d798f6929ddedbb39f9f11486558cdbc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 26 Aug 2022 12:26:06 +0100 Subject: Generate missing configuration files at startup (#13615) If things like the signing key file are missing, let's just try to generate them on startup. Again, this is useful for k8s-like deployments where we just want to generate keys on the first run. --- changelog.d/13615.feature | 1 + docs/usage/configuration/config_documentation.md | 8 +++- synapse/config/_base.py | 59 +++++++++++++++++++----- 3 files changed, 56 insertions(+), 12 deletions(-) create mode 100644 changelog.d/13615.feature diff --git a/changelog.d/13615.feature b/changelog.d/13615.feature new file mode 100644 index 0000000000..c2c568f1eb --- /dev/null +++ b/changelog.d/13615.feature @@ -0,0 +1 @@ +Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index e9ab58854e..4c59e3dcf2 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2139,6 +2139,9 @@ allows the shared secret to be specified in an external file. The file should be a plain text file, containing only the shared secret. +If this file does not exist, Synapse will create a new signing +key on startup and store it in this file. + Example configuration: ```yaml registration_shared_secret_file: /path/to/secrets/file @@ -2555,7 +2558,10 @@ Config options relating to signing keys --- ### `signing_key_path` -Path to the signing key to sign messages with. +Path to the signing key to sign events and federation requests with. + +*New in Synapse 1.67*: If this file does not exist, Synapse will create a new signing +key on startup and store it in this file. Example configuration: ```yaml diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 7c9cf403ef..1f6362aedd 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -20,6 +20,7 @@ import logging import os import re from collections import OrderedDict +from enum import Enum, auto from hashlib import sha256 from textwrap import dedent from typing import ( @@ -603,18 +604,44 @@ class RootConfig: " may specify directories containing *.yaml files.", ) - generate_group = parser.add_argument_group("Config generation") - generate_group.add_argument( + # we nest the mutually-exclusive group inside another group so that the help + # text shows them in their own group. + generate_mode_group = parser.add_argument_group( + "Config generation mode", + ) + generate_mode_exclusive = generate_mode_group.add_mutually_exclusive_group() + generate_mode_exclusive.add_argument( + # hidden option to make the type and default work + "--generate-mode", + help=argparse.SUPPRESS, + type=_ConfigGenerateMode, + default=_ConfigGenerateMode.GENERATE_MISSING_AND_RUN, + ) + generate_mode_exclusive.add_argument( "--generate-config", - action="store_true", help="Generate a config file, then exit.", + action="store_const", + const=_ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT, + dest="generate_mode", ) - generate_group.add_argument( + generate_mode_exclusive.add_argument( "--generate-missing-configs", "--generate-keys", - action="store_true", help="Generate any missing additional config files, then exit.", + action="store_const", + const=_ConfigGenerateMode.GENERATE_MISSING_AND_EXIT, + dest="generate_mode", ) + generate_mode_exclusive.add_argument( + "--generate-missing-and-run", + help="Generate any missing additional config files, then run. This is the " + "default behaviour.", + action="store_const", + const=_ConfigGenerateMode.GENERATE_MISSING_AND_RUN, + dest="generate_mode", + ) + + generate_group = parser.add_argument_group("Details for --generate-config") generate_group.add_argument( "-H", "--server-name", help="The server name to generate a config file for." ) @@ -670,11 +697,12 @@ class RootConfig: config_dir_path = os.path.abspath(config_dir_path) data_dir_path = os.getcwd() - generate_missing_configs = config_args.generate_missing_configs - obj = cls(config_files) - if config_args.generate_config: + if ( + config_args.generate_mode + == _ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT + ): if config_args.report_stats is None: parser.error( "Please specify either --report-stats=yes or --report-stats=no\n\n" @@ -732,11 +760,14 @@ class RootConfig: ) % (config_path,) ) - generate_missing_configs = True config_dict = read_config_files(config_files) - if generate_missing_configs: - obj.generate_missing_files(config_dict, config_dir_path) + obj.generate_missing_files(config_dict, config_dir_path) + + if config_args.generate_mode in ( + _ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT, + _ConfigGenerateMode.GENERATE_MISSING_AND_EXIT, + ): return None obj.parse_config_dict( @@ -965,6 +996,12 @@ def read_file(file_path: Any, config_path: Iterable[str]) -> str: raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e +class _ConfigGenerateMode(Enum): + GENERATE_MISSING_AND_RUN = auto() + GENERATE_MISSING_AND_EXIT = auto() + GENERATE_EVERYTHING_AND_EXIT = auto() + + __all__ = [ "Config", "RootConfig", -- cgit 1.4.1 From c4e29b6908ac8ae57b5e9a3e7662ad638b61e94a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 26 Aug 2022 14:29:31 +0100 Subject: Improve documentation around user registration (#13640) Update a bunch of the documentation for user registration, add some cross links, etc. --- changelog.d/13640.doc | 1 + docs/admin_api/register_api.md | 6 +- docs/setup/installation.md | 23 ++- docs/usage/configuration/config_documentation.md | 207 +++++++++++++---------- 4 files changed, 134 insertions(+), 103 deletions(-) create mode 100644 changelog.d/13640.doc diff --git a/changelog.d/13640.doc b/changelog.d/13640.doc new file mode 100644 index 0000000000..fa049371cf --- /dev/null +++ b/changelog.d/13640.doc @@ -0,0 +1 @@ +Improve documentation around user registration. diff --git a/docs/admin_api/register_api.md b/docs/admin_api/register_api.md index d7b7cf6a76..f6be31b443 100644 --- a/docs/admin_api/register_api.md +++ b/docs/admin_api/register_api.md @@ -5,9 +5,9 @@ non-interactive way. This is generally used for bootstrapping a Synapse instance with administrator accounts. To authenticate yourself to the server, you will need both the shared secret -(`registration_shared_secret` in the homeserver configuration), and a -one-time nonce. If the registration shared secret is not configured, this API -is not enabled. +([`registration_shared_secret`](../configuration/config_documentation.md#registration_shared_secret) +in the homeserver configuration), and a one-time nonce. If the registration +shared secret is not configured, this API is not enabled. To fetch the nonce, you need to request one from the API: diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 260e50577b..bb78b3267a 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -506,9 +506,13 @@ email will be disabled. ### Registering a user -The easiest way to create a new user is to do so from a client like [Element](https://element.io/). +One way to create a new user is to do so from a client like +[Element](https://element.io/). This requires registration to be enabled via +the +[`enable_registration`](../usage/configuration/config_documentation.md#enable_registration) +setting. -Alternatively, you can do so from the command line. This can be done as follows: +Alternatively, you can create new users from the command line. This can be done as follows: 1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was installed via a prebuilt package, `register_new_matrix_user` should already be @@ -520,7 +524,7 @@ Alternatively, you can do so from the command line. This can be done as follows: ``` 2. Run the following command: ```sh - register_new_matrix_user -c homeserver.yaml http://localhost:8008 + register_new_matrix_user -c homeserver.yaml ``` This will prompt you to add details for the new user, and will then connect to @@ -533,12 +537,13 @@ Make admin [no]: Success! ``` -This process uses a setting `registration_shared_secret` in -`homeserver.yaml`, which is shared between Synapse itself and the -`register_new_matrix_user` script. It doesn't matter what it is (a random -value is generated by `--generate-config`), but it should be kept secret, as -anyone with knowledge of it can register users, including admin accounts, -on your server even if `enable_registration` is `false`. +This process uses a setting +[`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret), +which is shared between Synapse itself and the `register_new_matrix_user` +script. It doesn't matter what it is (a random value is generated by +`--generate-config`), but it should be kept secret, as anyone with knowledge of +it can register users, including admin accounts, on your server even if +`enable_registration` is `false`. ### Setting up a TURN server diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 4c59e3dcf2..c0cfe5dcea 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1873,8 +1873,8 @@ See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha. --- ### `recaptcha_public_key` -This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is -enabled. +This homeserver's ReCAPTCHA public key. Must be specified if +[`enable_registration_captcha`](#enable_registration_captcha) is enabled. Example configuration: ```yaml @@ -1883,7 +1883,8 @@ recaptcha_public_key: "YOUR_PUBLIC_KEY" --- ### `recaptcha_private_key` -This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is +This homeserver's ReCAPTCHA private key. Must be specified if +[`enable_registration_captcha`](#enable_registration_captcha) is enabled. Example configuration: @@ -1893,9 +1894,11 @@ recaptcha_private_key: "YOUR_PRIVATE_KEY" --- ### `enable_registration_captcha` -Set to true to enable ReCaptcha checks when registering, preventing signup -unless a captcha is answered. Requires a valid ReCaptcha public/private key. -Defaults to false. +Set to `true` to require users to complete a CAPTCHA test when registering an account. +Requires a valid ReCaptcha public/private key. +Defaults to `false`. + +Note that [`enable_registration`](#enable_registration) must also be set to allow account registration. Example configuration: ```yaml @@ -1971,98 +1974,43 @@ Registration can be rate-limited using the parameters in the [Ratelimiting](#rat --- ### `enable_registration` -Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration, -you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration -without any verification, you must also set `enable_registration_without_verification` to true. +Enable registration for new users. Defaults to `false`. -Example configuration: -```yaml -enable_registration: true -``` ---- -### `enable_registration_without_verification` -Enable registration without email or captcha verification. Note: this option is *not* recommended, -as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect -unless `enable_registration` is also enabled. +It is highly recommended that if you enable registration, you set one or more +or the following options, to avoid abuse of your server by "bots": -Example configuration: -```yaml -enable_registration_without_verification: true -``` ---- -### `session_lifetime` + * [`enable_registration_captcha`](#enable_registration_captcha) + * [`registrations_require_3pid`](#registrations_require_3pid) + * [`registration_requires_token`](#registration_requires_token) -Time that a user's session remains valid for, after they log in. +(In order to enable registration without any verification, you must also set +[`enable_registration_without_verification`](#enable_registration_without_verification).) -Note that this is not currently compatible with guest logins. - -Note also that this is calculated at login time: changes are not applied retrospectively to users who have already -logged in. - -By default, this is infinite. +Note that even if this setting is disabled, new accounts can still be created +via the admin API if +[`registration_shared_secret`](#registration_shared_secret) is set. Example configuration: ```yaml -session_lifetime: 24h -``` ----- -### `refresh_access_token_lifetime` - -Time that an access token remains valid for, if the session is using refresh tokens. - -For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md). - -Note that this only applies to clients which advertise support for refresh tokens. - -Note also that this is calculated at login time and refresh time: changes are not applied to -existing sessions until they are refreshed. - -By default, this is 5 minutes. - -Example configuration: -```yaml -refreshable_access_token_lifetime: 10m -``` ---- -### `refresh_token_lifetime: 24h` - -Time that a refresh token remains valid for (provided that it is not -exchanged for another one first). -This option can be used to automatically log-out inactive sessions. -Please see the manual for more information. - -Note also that this is calculated at login time and refresh time: -changes are not applied to existing sessions until they are refreshed. - -By default, this is infinite. - -Example configuration: -```yaml -refresh_token_lifetime: 24h +enable_registration: true ``` --- -### `nonrefreshable_access_token_lifetime` - -Time that an access token remains valid for, if the session is NOT -using refresh tokens. - -Please note that not all clients support refresh tokens, so setting -this to a short value may be inconvenient for some users who will -then be logged out frequently. - -Note also that this is calculated at login time: changes are not applied -retrospectively to existing sessions for users that have already logged in. +### `enable_registration_without_verification` -By default, this is infinite. +Enable registration without email or captcha verification. Note: this option is *not* recommended, +as registration without verification is a known vector for spam and abuse. Defaults to `false`. Has no effect +unless [`enable_registration`](#enable_registration) is also enabled. Example configuration: ```yaml -nonrefreshable_access_token_lifetime: 24h +enable_registration_without_verification: true ``` --- ### `registrations_require_3pid` -If this is set, the user must provide all of the specified types of 3PID when registering. +If this is set, users must provide all of the specified types of 3PID when registering an account. + +Note that [`enable_registration`](#enable_registration) must also be set to allow account registration. Example configuration: ```yaml @@ -2110,9 +2058,11 @@ enable_3pid_lookup: false Require users to submit a token during registration. Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md). -Note that `enable_registration` must be set to true. Disabling this option will not delete any tokens previously generated. -Defaults to false. Set to true to enable. +Defaults to `false`. Set to `true` to enable. + + +Note that [`enable_registration`](#enable_registration) must also be set to allow account registration. Example configuration: ```yaml @@ -2121,8 +2071,13 @@ registration_requires_token: true --- ### `registration_shared_secret` -If set, allows registration of standard or admin accounts by anyone who -has the shared secret, even if registration is otherwise disabled. +If set, allows registration of standard or admin accounts by anyone who has the +shared secret, even if [`enable_registration`](#enable_registration) is not +set. + +This is primarily intended for use with the `register_new_matrix_user` script +(see [Registering a user](../../setup/installation.md#registering-a-user)); +however, the interface is [documented](../admin_api/register_api.html). See also [`registration_shared_secret_path`](#registration_shared_secret_path). @@ -2379,6 +2334,79 @@ Example configuration: ```yaml inhibit_user_in_use_error: true ``` +--- +## User session management +--- +### `session_lifetime` + +Time that a user's session remains valid for, after they log in. + +Note that this is not currently compatible with guest logins. + +Note also that this is calculated at login time: changes are not applied retrospectively to users who have already +logged in. + +By default, this is infinite. + +Example configuration: +```yaml +session_lifetime: 24h +``` +---- +### `refresh_access_token_lifetime` + +Time that an access token remains valid for, if the session is using refresh tokens. + +For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md). + +Note that this only applies to clients which advertise support for refresh tokens. + +Note also that this is calculated at login time and refresh time: changes are not applied to +existing sessions until they are refreshed. + +By default, this is 5 minutes. + +Example configuration: +```yaml +refreshable_access_token_lifetime: 10m +``` +--- +### `refresh_token_lifetime: 24h` + +Time that a refresh token remains valid for (provided that it is not +exchanged for another one first). +This option can be used to automatically log-out inactive sessions. +Please see the manual for more information. + +Note also that this is calculated at login time and refresh time: +changes are not applied to existing sessions until they are refreshed. + +By default, this is infinite. + +Example configuration: +```yaml +refresh_token_lifetime: 24h +``` +--- +### `nonrefreshable_access_token_lifetime` + +Time that an access token remains valid for, if the session is NOT +using refresh tokens. + +Please note that not all clients support refresh tokens, so setting +this to a short value may be inconvenient for some users who will +then be logged out frequently. + +Note also that this is calculated at login time: changes are not applied +retrospectively to existing sessions for users that have already logged in. + +By default, this is infinite. + +Example configuration: +```yaml +nonrefreshable_access_token_lifetime: 24h +``` + --- ## Metrics ### Config options related to metrics. @@ -2666,13 +2694,10 @@ key_server_signing_keys_path: "key_server_signing_keys.key" The following settings can be used to make Synapse use a single sign-on provider for authentication, instead of its internal password database. -You will probably also want to set the following options to false to +You will probably also want to set the following options to `false` to disable the regular login/registration flows: - * `enable_registration` - * `password_config.enabled` - -You will also want to investigate the settings under the "sso" configuration -section below. + * [`enable_registration`](#enable_registration) + * [`password_config.enabled`](#password_config) --- ### `saml2_config` -- cgit 1.4.1 From 4f6de33f4147f745eb808b717666eb4d35a12716 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Sun, 28 Aug 2022 21:05:30 +0100 Subject: Print complement failure results last (#13639) Since github always scrolls to the bottom of any test output, let's put the failed tests last and hide any successful packages. --- .ci/complement_package.gotpl | 14 ++++++-------- .ci/scripts/gotestfmt | 21 +++++++++++++++++++++ .github/workflows/latest_deps.yml | 2 +- .github/workflows/tests.yml | 2 +- .github/workflows/twisted_trunk.yml | 2 +- changelog.d/13639.misc | 1 + 6 files changed, 31 insertions(+), 11 deletions(-) create mode 100755 .ci/scripts/gotestfmt create mode 100644 changelog.d/13639.misc diff --git a/.ci/complement_package.gotpl b/.ci/complement_package.gotpl index e1625fd31f..2dd5e5e0e6 100644 --- a/.ci/complement_package.gotpl +++ b/.ci/complement_package.gotpl @@ -27,10 +27,10 @@ which is under the Unlicense licence. {{- . -}}{{- "\n" -}} {{- end -}} {{- with .TestCases -}} - {{- /* Failing tests are first */ -}} + {{- /* Passing tests are first */ -}} {{- range . -}} - {{- if and (ne .Result "PASS") (ne .Result "SKIP") -}} - ::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}} + {{- if eq .Result "PASS" -}} + ::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}} {{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}} {{- with .Coverage -}} , coverage: {{ . }}% @@ -47,7 +47,6 @@ which is under the Unlicense licence. {{- end -}} {{- end -}} - {{- /* Then skipped tests are second */ -}} {{- range . -}} {{- if eq .Result "SKIP" -}} @@ -68,11 +67,10 @@ which is under the Unlicense licence. {{- end -}} {{- end -}} - - {{- /* Then passing tests are last */ -}} + {{- /* and failing tests are last */ -}} {{- range . -}} - {{- if eq .Result "PASS" -}} - ::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}} + {{- if and (ne .Result "PASS") (ne .Result "SKIP") -}} + ::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}} {{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}} {{- with .Coverage -}} , coverage: {{ . }}% diff --git a/.ci/scripts/gotestfmt b/.ci/scripts/gotestfmt new file mode 100755 index 0000000000..83e0ec6361 --- /dev/null +++ b/.ci/scripts/gotestfmt @@ -0,0 +1,21 @@ +#!/bin/bash +# +# wraps `gotestfmt`, hiding output from successful packages unless +# all tests passed. + +set -o pipefail +set -e + +# tee the test results to a log, whilst also piping them into gotestfmt, +# telling it to hide successful results, so that we can clearly see +# unsuccessful results. +tee complement.log | gotestfmt -hide successful-packages + +# gotestfmt will exit non-zero if there were any failures, so if we got to this +# point, we must have had a successful result. +echo "All tests successful; showing all test results" + +# Pipe the test results back through gotestfmt, showing all results. +# The log file consists of JSON lines giving the test results, interspersed +# with regular stdout lines (including reports of downloaded packages). +grep '^{"Time":' complement.log | gotestfmt diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index f263cf612d..7dac617c4b 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -163,7 +163,7 @@ jobs: - run: | set -o pipefail - TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt shell: bash name: Run Complement Tests diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 144cb9ffaa..3e6d718a7c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -354,7 +354,7 @@ jobs: - run: | set -o pipefail - POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt shell: bash name: Run Complement Tests diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index dd8e6fbb1c..0906101cc1 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -137,7 +137,7 @@ jobs: - run: | set -o pipefail - TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt shell: bash name: Run Complement Tests diff --git a/changelog.d/13639.misc b/changelog.d/13639.misc new file mode 100644 index 0000000000..de4e4d1206 --- /dev/null +++ b/changelog.d/13639.misc @@ -0,0 +1 @@ +Improve readability of Complement CI logs by printing failure results last. -- cgit 1.4.1 From 51d732db3b4ab13eb58e937a546abce7968112ef Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 30 Aug 2022 01:38:14 -0500 Subject: Optimize how we calculate `likely_domains` during backfill (#13575) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Optimize how we calculate `likely_domains` during backfill because I've seen this take 17s in production just to `get_current_state` which is used to `get_domains_from_state` (see case [*2. Loading tons of events* in the `/messages` investigation issue](https://github.com/matrix-org/synapse/issues/13356)). There are 3 ways we currently calculate hosts that are in the room: 1. `get_current_state` -> `get_domains_from_state` - Used in `backfill` to calculate `likely_domains` and `/timestamp_to_event` because it was cargo-culted from `backfill` - This one is being eliminated in favor of `get_current_hosts_in_room` in this PR 🕳 1. `get_current_hosts_in_room` - Used for other federation things like sending read receipts and typing indicators 1. `get_hosts_in_room_at_events` - Used when pushing out events over federation to other servers in the `_process_event_queue_loop` Fix https://github.com/matrix-org/synapse/issues/13626 Part of https://github.com/matrix-org/synapse/issues/13356 Mentioned in [internal doc](https://docs.google.com/document/d/1lvUoVfYUiy6UaHB6Rb4HicjaJAU40-APue9Q4vzuW3c/edit#bookmark=id.2tvwz3yhcafh) ### Query performance #### Before The query from `get_current_state` sucks just because we have to get all 80k events. And we see almost the exact same performance locally trying to get all of these events (16s vs 17s): ``` synapse=# SELECT type, state_key, event_id FROM current_state_events WHERE room_id = '!OGEhHVWSdvArJzumhm:matrix.org'; Time: 16035.612 ms (00:16.036) synapse=# SELECT type, state_key, event_id FROM current_state_events WHERE room_id = '!OGEhHVWSdvArJzumhm:matrix.org'; Time: 4243.237 ms (00:04.243) ``` But what about `get_current_hosts_in_room`: When there is 8M rows in the `current_state_events` table, the previous query in `get_current_hosts_in_room` took 13s from complete freshness (when the events were first added). But takes 930ms after a Postgres restart or 390ms if running back to back to back. ```sh $ psql synapse synapse=# \timing on synapse=# SELECT COUNT(DISTINCT substring(state_key FROM '@[^:]*:(.*)$')) FROM current_state_events WHERE type = 'm.room.member' AND membership = 'join' AND room_id = '!OGEhHVWSdvArJzumhm:matrix.org'; count ------- 4130 (1 row) Time: 13181.598 ms (00:13.182) synapse=# SELECT COUNT(*) from current_state_events where room_id = '!OGEhHVWSdvArJzumhm:matrix.org'; count ------- 80814 synapse=# SELECT COUNT(*) from current_state_events; count --------- 8162847 synapse=# SELECT pg_size_pretty( pg_total_relation_size('current_state_events') ); pg_size_pretty ---------------- 4702 MB ``` #### After I'm not sure how long it takes from complete freshness as I only really get that opportunity once (maybe restarting computer but that's cumbersome) and it's not really relevant to normal operating times. Maybe you get closer to the fresh times the more access variability there is so that Postgres caches aren't as exact. Update: The longest I've seen this run for is 6.4s and 4.5s after a computer restart. After a Postgres restart, it takes 330ms and running back to back takes 260ms. ```sh $ psql synapse synapse=# \timing on Timing is on. synapse=# SELECT substring(c.state_key FROM '@[^:]*:(.*)$') as host FROM current_state_events c /* Get the depth of the event from the events table */ INNER JOIN events AS e USING (event_id) WHERE c.type = 'm.room.member' AND c.membership = 'join' AND c.room_id = '!OGEhHVWSdvArJzumhm:matrix.org' GROUP BY host ORDER BY min(e.depth) ASC; Time: 333.800 ms ``` #### Going further To improve things further we could add a `limit` parameter to `get_current_hosts_in_room`. Realistically, we don't need 4k domains to choose from because there is no way we're going to query that many before we a) probably get an answer or b) we give up. Another thing we can do is optimize the query to use a index skip scan: - https://wiki.postgresql.org/wiki/Loose_indexscan - Index Skip Scan, https://commitfest.postgresql.org/37/1741/ - https://www.timescale.com/blog/how-we-made-distinct-queries-up-to-8000x-faster-on-postgresql/ --- changelog.d/13575.misc | 1 + synapse/handlers/federation.py | 53 ++++------------- synapse/handlers/room.py | 14 ++--- synapse/storage/controllers/state.py | 3 +- synapse/storage/databases/main/roommember.py | 88 ++++++++++++++++++++++------ 5 files changed, 89 insertions(+), 70 deletions(-) create mode 100644 changelog.d/13575.misc diff --git a/changelog.d/13575.misc b/changelog.d/13575.misc new file mode 100644 index 0000000000..3841472617 --- /dev/null +++ b/changelog.d/13575.misc @@ -0,0 +1 @@ +Optimize how Synapse calculates domains to fetch from during backfill. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e151962055..dd4b9f66d1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -70,7 +70,7 @@ from synapse.replication.http.federation import ( from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.state import StateFilter -from synapse.types import JsonDict, StateMap, get_domain_from_id +from synapse.types import JsonDict, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.retryutils import NotRetryingDestination from synapse.visibility import filter_events_for_server @@ -104,37 +104,6 @@ backfill_processing_before_timer = Histogram( ) -def get_domains_from_state(state: StateMap[EventBase]) -> List[Tuple[str, int]]: - """Get joined domains from state - - Args: - state: State map from type/state key to event. - - Returns: - Returns a list of servers with the lowest depth of their joins. - Sorted by lowest depth first. - """ - joined_users = [ - (state_key, int(event.depth)) - for (e_type, state_key), event in state.items() - if e_type == EventTypes.Member and event.membership == Membership.JOIN - ] - - joined_domains: Dict[str, int] = {} - for u, d in joined_users: - try: - dom = get_domain_from_id(u) - old_d = joined_domains.get(dom) - if old_d: - joined_domains[dom] = min(d, old_d) - else: - joined_domains[dom] = d - except Exception: - pass - - return sorted(joined_domains.items(), key=lambda d: d[1]) - - class _BackfillPointType(Enum): # a regular backwards extremity (ie, an event which we don't yet have, but which # is referred to by other events in the DAG) @@ -432,21 +401,19 @@ class FederationHandler: ) # Now we need to decide which hosts to hit first. - - # First we try hosts that are already in the room + # First we try hosts that are already in the room. # TODO: HEURISTIC ALERT. + likely_domains = ( + await self._storage_controllers.state.get_current_hosts_in_room(room_id) + ) - curr_state = await self._storage_controllers.state.get_current_state(room_id) - - curr_domains = get_domains_from_state(curr_state) - - likely_domains = [ - domain for domain, depth in curr_domains if domain != self.server_name - ] - - async def try_backfill(domains: List[str]) -> bool: + async def try_backfill(domains: Collection[str]) -> bool: # TODO: Should we try multiple of these at a time? for dom in domains: + # We don't want to ask our own server for information we don't have + if dom == self.server_name: + continue + try: await self._federation_event_handler.backfill( dom, room_id, limit=100, extremities=extremities_to_request diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2fc8264858..f64a8690a5 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -60,7 +60,6 @@ from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.federation.federation_client import InvalidResponseError -from synapse.handlers.federation import get_domains_from_state from synapse.handlers.relations import BundledAggregations from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin @@ -1462,17 +1461,16 @@ class TimestampLookupHandler: timestamp, ) - # Find other homeservers from the given state in the room - curr_state = await self._storage_controllers.state.get_current_state( - room_id + likely_domains = ( + await self._storage_controllers.state.get_current_hosts_in_room(room_id) ) - curr_domains = get_domains_from_state(curr_state) - likely_domains = [ - domain for domain, depth in curr_domains if domain != self.server_name - ] # Loop through each homeserver candidate until we get a succesful response for domain in likely_domains: + # We don't want to ask our own server for information we don't have + if domain == self.server_name: + continue + try: remote_response = await self.federation_client.timestamp_to_event( domain, room_id, timestamp, direction diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index f9ffd0e29e..ba5380ce3e 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -23,7 +23,6 @@ from typing import ( List, Mapping, Optional, - Set, Tuple, ) @@ -520,7 +519,7 @@ class StateStorageController: ) return state_map.get(key) - async def get_current_hosts_in_room(self, room_id: str) -> Set[str]: + async def get_current_hosts_in_room(self, room_id: str) -> List[str]: """Get current hosts in room based on current state.""" await self._partial_state_room_tracker.await_full_state(room_id) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 9e5034b401..06500457bd 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -187,27 +187,48 @@ class RoomMemberWorkerStore(EventsWorkerStore): @cached(max_entries=100000, iterable=True) async def get_users_in_room(self, room_id: str) -> List[str]: + """ + Returns a list of users in the room sorted by longest in the room first + (aka. with the lowest depth). This is done to match the sort in + `get_current_hosts_in_room()` and so we can re-use the cache but it's + not horrible to have here either. + """ + return await self.db_pool.runInteraction( "get_users_in_room", self.get_users_in_room_txn, room_id ) def get_users_in_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[str]: + """ + Returns a list of users in the room sorted by longest in the room first + (aka. with the lowest depth). This is done to match the sort in + `get_current_hosts_in_room()` and so we can re-use the cache but it's + not horrible to have here either. + """ # If we can assume current_state_events.membership is up to date # then we can avoid a join, which is a Very Good Thing given how # frequently this function gets called. if self._current_state_events_membership_up_to_date: sql = """ - SELECT state_key FROM current_state_events - WHERE type = 'm.room.member' AND room_id = ? AND membership = ? + SELECT c.state_key FROM current_state_events as c + /* Get the depth of the event from the events table */ + INNER JOIN events AS e USING (event_id) + WHERE c.type = 'm.room.member' AND c.room_id = ? AND membership = ? + /* Sorted by lowest depth first */ + ORDER BY e.depth ASC; """ else: sql = """ - SELECT state_key FROM room_memberships as m + SELECT c.state_key FROM room_memberships as m + /* Get the depth of the event from the events table */ + INNER JOIN events AS e USING (event_id) INNER JOIN current_state_events as c ON m.event_id = c.event_id AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? + /* Sorted by lowest depth first */ + ORDER BY e.depth ASC; """ txn.execute(sql, (room_id, Membership.JOIN)) @@ -1037,37 +1058,70 @@ class RoomMemberWorkerStore(EventsWorkerStore): return True @cached(iterable=True, max_entries=10000) - async def get_current_hosts_in_room(self, room_id: str) -> Set[str]: - """Get current hosts in room based on current state.""" + async def get_current_hosts_in_room(self, room_id: str) -> List[str]: + """ + Get current hosts in room based on current state. + + The heuristic of sorting by servers who have been in the room the + longest is good because they're most likely to have anything we ask + about. + + Returns: + Returns a list of servers sorted by longest in the room first. (aka. + sorted by join with the lowest depth first). + """ # First we check if we already have `get_users_in_room` in the cache, as # we can just calculate result from that users = self.get_users_in_room.cache.get_immediate( (room_id,), None, update_metrics=False ) - if users is not None: - return {get_domain_from_id(u) for u in users} - - if isinstance(self.database_engine, Sqlite3Engine): + if users is None and isinstance(self.database_engine, Sqlite3Engine): # If we're using SQLite then let's just always use # `get_users_in_room` rather than funky SQL. users = await self.get_users_in_room(room_id) - return {get_domain_from_id(u) for u in users} + + if users is not None: + # Because `users` is sorted from lowest -> highest depth, the list + # of domains will also be sorted that way. + domains: List[str] = [] + # We use a `Set` just for fast lookups + domain_set: Set[str] = set() + for u in users: + domain = get_domain_from_id(u) + if domain not in domain_set: + domain_set.add(domain) + domains.append(domain) + return domains # For PostgreSQL we can use a regex to pull out the domains from the # joined users in `current_state_events` via regex. - def get_current_hosts_in_room_txn(txn: LoggingTransaction) -> Set[str]: + def get_current_hosts_in_room_txn(txn: LoggingTransaction) -> List[str]: + # Returns a list of servers currently joined in the room sorted by + # longest in the room first (aka. with the lowest depth). The + # heuristic of sorting by servers who have been in the room the + # longest is good because they're most likely to have anything we + # ask about. sql = """ - SELECT DISTINCT substring(state_key FROM '@[^:]*:(.*)$') - FROM current_state_events + SELECT + /* Match the domain part of the MXID */ + substring(c.state_key FROM '@[^:]*:(.*)$') as server_domain + FROM current_state_events c + /* Get the depth of the event from the events table */ + INNER JOIN events AS e USING (event_id) WHERE - type = 'm.room.member' - AND membership = 'join' - AND room_id = ? + /* Find any join state events in the room */ + c.type = 'm.room.member' + AND c.membership = 'join' + AND c.room_id = ? + /* Group all state events from the same domain into their own buckets (groups) */ + GROUP BY server_domain + /* Sorted by lowest depth first */ + ORDER BY min(e.depth) ASC; """ txn.execute(sql, (room_id,)) - return {d for d, in txn} + return [d for d, in txn] return await self.db_pool.runInteraction( "get_current_hosts_in_room", get_current_hosts_in_room_txn -- cgit 1.4.1 From 682dfcfc0db05d9c99b7615d950997535df4d533 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 30 Aug 2022 11:58:38 +0200 Subject: Fix that user cannot `/forget` rooms after the last member has left (#13546) --- changelog.d/13546.bugfix | 1 + synapse/handlers/room_member.py | 7 ++- tests/handlers/test_room_member.py | 93 +++++++++++++++++++++++++++++++++++++- tests/storage/test_roommember.py | 4 +- 4 files changed, 99 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13546.bugfix diff --git a/changelog.d/13546.bugfix b/changelog.d/13546.bugfix new file mode 100644 index 0000000000..83bc3a61d2 --- /dev/null +++ b/changelog.d/13546.bugfix @@ -0,0 +1 @@ +Fix bug that user cannot `/forget` rooms after the last member has left the room. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 709682622f..e726997d83 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1925,8 +1925,11 @@ class RoomMemberMasterHandler(RoomMemberHandler): ]: raise SynapseError(400, "User %s in room %s" % (user_id, room_id)) - if membership: - await self.store.forget(user_id, room_id) + # In normal case this call is only required if `membership` is not `None`. + # But: After the last member had left the room, the background update + # `_background_remove_left_rooms` is deleting rows related to this room from + # the table `current_state_events` and `get_current_state_events` is `None`. + await self.store.forget(user_id, room_id) def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]: diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 1d13ed1e88..6bbfd5dc84 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -6,7 +6,7 @@ import synapse.rest.admin import synapse.rest.client.login import synapse.rest.client.room from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import LimitExceededError +from synapse.api.errors import LimitExceededError, SynapseError from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import FrozenEventV3 from synapse.federation.federation_client import SendJoinResult @@ -17,7 +17,11 @@ from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import make_request from tests.test_utils import make_awaitable -from tests.unittest import FederatingHomeserverTestCase, override_config +from tests.unittest import ( + FederatingHomeserverTestCase, + HomeserverTestCase, + override_config, +) class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): @@ -287,3 +291,88 @@ class TestReplicatedJoinsLimitedByPerRoomRateLimiter(BaseMultiWorkerStreamTestCa ), LimitExceededError, ) + + +class RoomMemberMasterHandlerTestCase(HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + synapse.rest.client.room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.handler = hs.get_room_member_handler() + self.store = hs.get_datastores().main + + # Create two users. + self.alice = self.register_user("alice", "pass") + self.alice_ID = UserID.from_string(self.alice) + self.alice_token = self.login("alice", "pass") + self.bob = self.register_user("bob", "pass") + self.bob_ID = UserID.from_string(self.bob) + self.bob_token = self.login("bob", "pass") + + # Create a room on this homeserver. + self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + def test_leave_and_forget(self) -> None: + """Tests that forget a room is successfully. The test is performed with two users, + as forgetting by the last user respectively after all users had left the + is a special edge case.""" + self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) + + # alice is not the last room member that leaves and forgets the room + self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) + self.get_success(self.handler.forget(self.alice_ID, self.room_id)) + self.assertTrue( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) + + # the server has not forgotten the room + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(self.room_id)) + ) + + def test_leave_and_forget_last_user(self) -> None: + """Tests that forget a room is successfully when the last user has left the room.""" + + # alice is the last room member that leaves and forgets the room + self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) + self.get_success(self.handler.forget(self.alice_ID, self.room_id)) + self.assertTrue( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) + + # the server has forgotten the room + self.assertTrue( + self.get_success(self.store.is_locally_forgotten_room(self.room_id)) + ) + + def test_forget_when_not_left(self) -> None: + """Tests that a user cannot not forgets a room that has not left.""" + self.get_failure(self.handler.forget(self.alice_ID, self.room_id), SynapseError) + + def test_rejoin_forgotten_by_user(self) -> None: + """Test that a user that has forgotten a room can do a re-join. + The room was not forgotten from the local server. + One local user is still member of the room.""" + self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) + + self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) + self.get_success(self.handler.forget(self.alice_ID, self.room_id)) + self.assertTrue( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) + + # the server has not forgotten the room + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(self.room_id)) + ) + + self.helper.join(self.room_id, user=self.alice, tok=self.alice_token) + # TODO: A join to a room does not invalidate the forgotten cache + # see https://github.com/matrix-org/synapse/issues/13262 + self.store.did_forget.invalidate_all() + self.assertFalse( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index ceec690285..8794401823 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -158,7 +158,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): # Check that alice's display name is now None self.assertEqual(row[0]["display_name"], None) - def test_room_is_locally_forgotten(self): + def test_room_is_locally_forgotten(self) -> None: """Test that when the last local user has forgotten a room it is known as forgotten.""" # join two local and one remote user self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) @@ -199,7 +199,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): self.get_success(self.store.is_locally_forgotten_room(self.room)) ) - def test_join_locally_forgotten_room(self): + def test_join_locally_forgotten_room(self) -> None: """Tests if a user joins a forgotten room the room is not forgotten anymore.""" self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) self.assertFalse( -- cgit 1.4.1 From 1eea73b4133da3afc2361592f7b2fa4a4125249d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 30 Aug 2022 06:08:29 -0500 Subject: Fix rate limit metrics registering twice and misreporting (#13649) * Fix rate limit metrics registering twice and misreporting Fix https://github.com/matrix-org/synapse/issues/13641 * Fix lints * Add changelog * Document `metrics_name=None`. --- changelog.d/13649.bugfix | 1 + synapse/server.py | 4 +- synapse/util/ratelimitutils.py | 155 ++++++++++++++++++++++++++++++++--------- 3 files changed, 128 insertions(+), 32 deletions(-) create mode 100644 changelog.d/13649.bugfix diff --git a/changelog.d/13649.bugfix b/changelog.d/13649.bugfix new file mode 100644 index 0000000000..e6513a585a --- /dev/null +++ b/changelog.d/13649.bugfix @@ -0,0 +1 @@ +Fix rate limit gauge metrics registering twice and misreporting (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). diff --git a/synapse/server.py b/synapse/server.py index 181984a1a4..c2e55bf0b1 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -756,7 +756,9 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_federation_ratelimiter(self) -> FederationRateLimiter: return FederationRateLimiter( - self.get_clock(), config=self.config.ratelimiting.rc_federation + self.get_clock(), + config=self.config.ratelimiting.rc_federation, + metrics_name="federation_servlets", ) @cache_in_self diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index f678b52cb4..9f64fed0d7 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -15,10 +15,23 @@ import collections import contextlib import logging +import threading import typing -from typing import Any, DefaultDict, Iterator, List, Set +from typing import ( + Any, + Callable, + DefaultDict, + Dict, + Iterator, + List, + Mapping, + Optional, + Set, + Tuple, +) from prometheus_client.core import Counter +from typing_extensions import ContextManager from twisted.internet import defer @@ -40,12 +53,20 @@ logger = logging.getLogger(__name__) # Track how much the ratelimiter is affecting requests -rate_limit_sleep_counter = Counter("synapse_rate_limit_sleep", "") -rate_limit_reject_counter = Counter("synapse_rate_limit_reject", "") +rate_limit_sleep_counter = Counter( + "synapse_rate_limit_sleep", + "Number of requests slept by the rate limiter", + ["rate_limiter_name"], +) +rate_limit_reject_counter = Counter( + "synapse_rate_limit_reject", + "Number of requests rejected by the rate limiter", + ["rate_limiter_name"], +) queue_wait_timer = Histogram( "synapse_rate_limit_queue_wait_time_seconds", - "sec", - [], + "Amount of time spent waiting for the rate limiter to let our request through.", + ["rate_limiter_name"], buckets=( 0.005, 0.01, @@ -65,35 +86,92 @@ queue_wait_timer = Histogram( ) +_rate_limiter_instances: Set["FederationRateLimiter"] = set() +# Protects the _rate_limiter_instances set from concurrent access +_rate_limiter_instances_lock = threading.Lock() + + +def _get_counts_from_rate_limiter_instance( + count_func: Callable[["FederationRateLimiter"], int] +) -> Mapping[Tuple[str, ...], int]: + """Returns a count of something (slept/rejected hosts) by (metrics_name)""" + # Cast to a list to prevent it changing while the Prometheus + # thread is collecting metrics + with _rate_limiter_instances_lock: + rate_limiter_instances = list(_rate_limiter_instances) + + # Map from (metrics_name,) -> int, the number of something like slept hosts + # or rejected hosts. The key type is Tuple[str], but we leave the length + # unspecified for compatability with LaterGauge's annotations. + counts: Dict[Tuple[str, ...], int] = {} + for rate_limiter_instance in rate_limiter_instances: + # Only track metrics if they provided a `metrics_name` to + # differentiate this instance of the rate limiter. + if rate_limiter_instance.metrics_name: + key = (rate_limiter_instance.metrics_name,) + counts[key] = count_func(rate_limiter_instance) + + return counts + + +# We track the number of affected hosts per time-period so we can +# differentiate one really noisy homeserver from a general +# ratelimit tuning problem across the federation. +LaterGauge( + "synapse_rate_limit_sleep_affected_hosts", + "Number of hosts that had requests put to sleep", + ["rate_limiter_name"], + lambda: _get_counts_from_rate_limiter_instance( + lambda rate_limiter_instance: sum( + ratelimiter.should_sleep() + for ratelimiter in rate_limiter_instance.ratelimiters.values() + ) + ), +) +LaterGauge( + "synapse_rate_limit_reject_affected_hosts", + "Number of hosts that had requests rejected", + ["rate_limiter_name"], + lambda: _get_counts_from_rate_limiter_instance( + lambda rate_limiter_instance: sum( + ratelimiter.should_reject() + for ratelimiter in rate_limiter_instance.ratelimiters.values() + ) + ), +) + + class FederationRateLimiter: - def __init__(self, clock: Clock, config: FederationRatelimitSettings): + """Used to rate limit request per-host.""" + + def __init__( + self, + clock: Clock, + config: FederationRatelimitSettings, + metrics_name: Optional[str] = None, + ): + """ + Args: + clock + config + metrics_name: The name of the rate limiter so we can differentiate it + from the rest in the metrics. If `None`, we don't track metrics + for this rate limiter. + + """ + self.metrics_name = metrics_name + def new_limiter() -> "_PerHostRatelimiter": - return _PerHostRatelimiter(clock=clock, config=config) + return _PerHostRatelimiter( + clock=clock, config=config, metrics_name=metrics_name + ) self.ratelimiters: DefaultDict[ str, "_PerHostRatelimiter" ] = collections.defaultdict(new_limiter) - # We track the number of affected hosts per time-period so we can - # differentiate one really noisy homeserver from a general - # ratelimit tuning problem across the federation. - LaterGauge( - "synapse_rate_limit_sleep_affected_hosts", - "Number of hosts that had requests put to sleep", - [], - lambda: sum( - ratelimiter.should_sleep() for ratelimiter in self.ratelimiters.values() - ), - ) - LaterGauge( - "synapse_rate_limit_reject_affected_hosts", - "Number of hosts that had requests rejected", - [], - lambda: sum( - ratelimiter.should_reject() - for ratelimiter in self.ratelimiters.values() - ), - ) + with _rate_limiter_instances_lock: + _rate_limiter_instances.add(self) def ratelimit(self, host: str) -> "_GeneratorContextManager[defer.Deferred[None]]": """Used to ratelimit an incoming request from a given host @@ -114,13 +192,23 @@ class FederationRateLimiter: class _PerHostRatelimiter: - def __init__(self, clock: Clock, config: FederationRatelimitSettings): + def __init__( + self, + clock: Clock, + config: FederationRatelimitSettings, + metrics_name: Optional[str] = None, + ): """ Args: clock config + metrics_name: The name of the rate limiter so we can differentiate it + from the rest in the metrics. If `None`, we don't track metrics + for this rate limiter. + from the rest in the metrics """ self.clock = clock + self.metrics_name = metrics_name self.window_size = config.window_size self.sleep_limit = config.sleep_limit @@ -178,7 +266,10 @@ class _PerHostRatelimiter: return len(self.request_times) > self.sleep_limit async def _on_enter_with_tracing(self, request_id: object) -> None: - with start_active_span("ratelimit wait"), queue_wait_timer.time(): + maybe_metrics_cm: ContextManager = contextlib.nullcontext() + if self.metrics_name: + maybe_metrics_cm = queue_wait_timer.labels(self.metrics_name).time() + with start_active_span("ratelimit wait"), maybe_metrics_cm: await self._on_enter(request_id) def _on_enter(self, request_id: object) -> "defer.Deferred[None]": @@ -193,7 +284,8 @@ class _PerHostRatelimiter: # sleeping or in the ready queue). if self.should_reject(): logger.debug("Ratelimiter(%s): rejecting request", self.host) - rate_limit_reject_counter.inc() + if self.metrics_name: + rate_limit_reject_counter.labels(self.metrics_name).inc() raise LimitExceededError( retry_after_ms=int(self.window_size / self.sleep_limit) ) @@ -228,7 +320,8 @@ class _PerHostRatelimiter: id(request_id), self.sleep_sec, ) - rate_limit_sleep_counter.inc() + if self.metrics_name: + rate_limit_sleep_counter.labels(self.metrics_name).inc() ret_defer = run_in_background(self.clock.sleep, self.sleep_sec) self.sleeping_requests.add(request_id) -- cgit 1.4.1 From 20df96a7a7e3d676b0beae12d0eeb1f1d668247e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 30 Aug 2022 07:12:48 -0400 Subject: Speed up inserting `event_push_actions_staging`. (#13634) By using `execute_values` instead of `execute_batch`. --- changelog.d/13634.feature | 1 + .../storage/databases/main/event_push_actions.py | 28 +++++++--------------- 2 files changed, 9 insertions(+), 20 deletions(-) create mode 100644 changelog.d/13634.feature diff --git a/changelog.d/13634.feature b/changelog.d/13634.feature new file mode 100644 index 0000000000..0a8827205d --- /dev/null +++ b/changelog.d/13634.feature @@ -0,0 +1 @@ +Improve performance of sending messages in rooms with thousands of local users. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 8dfa545c27..9f410d69de 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -700,26 +700,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas int(count_as_unread), # unread column ) - def _add_push_actions_to_staging_txn(txn: LoggingTransaction) -> None: - # We don't use simple_insert_many here to avoid the overhead - # of generating lists of dicts. - - sql = """ - INSERT INTO event_push_actions_staging - (event_id, user_id, actions, notif, highlight, unread) - VALUES (?, ?, ?, ?, ?, ?) - """ - - txn.execute_batch( - sql, - ( - _gen_entry(user_id, actions) - for user_id, actions in user_id_actions.items() - ), - ) - - return await self.db_pool.runInteraction( - "add_push_actions_to_staging", _add_push_actions_to_staging_txn + await self.db_pool.simple_insert_many( + "event_push_actions_staging", + keys=("event_id", "user_id", "actions", "notif", "highlight", "unread"), + values=[ + _gen_entry(user_id, actions) + for user_id, actions in user_id_actions.items() + ], + desc="add_push_actions_to_staging", ) async def remove_push_actions_from_staging(self, event_id: str) -> None: -- cgit 1.4.1 From 303b40b988bc372a81fc1a3cf62d3f5d074970ff Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 30 Aug 2022 07:15:54 -0400 Subject: Do not wait for background updates to complete do expire URL cache. (#13657) Media downloaded as part of a URL preview is normally deleted after two days. However, while a background database migration is running, the process is stopped. A long-running database migration can therefore cause the media store to fill up with old preview files. This logic was added in #2697 to make sure that we didn't try to run the expiry without an index on `local_media_repository.created_ts`; the original logic that needs that index was added in #2478 (in `get_url_cache_media_before`, as amended by 93247a424a5068b088567fa98b6990e47608b7cb), and is still present. Given that the background update was added before Synapse v1.0.0, just drop this check and assume the index exists. --- changelog.d/13657.bugfix | 1 + synapse/rest/media/v1/preview_url_resource.py | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/13657.bugfix diff --git a/changelog.d/13657.bugfix b/changelog.d/13657.bugfix new file mode 100644 index 0000000000..d314d9c52f --- /dev/null +++ b/changelog.d/13657.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index b36c98a08e..a8f6fd6b35 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -732,10 +732,6 @@ class PreviewUrlResource(DirectServeJsonResource): logger.debug("Running url preview cache expiry") - if not (await self.store.db_pool.updates.has_completed_background_updates()): - logger.debug("Still running DB updates; skipping url preview cache expiry") - return - def try_remove_parent_dirs(dirs: Iterable[str]) -> None: """Attempt to remove the given chain of parent directories -- cgit 1.4.1 From 1c26acd815a8609314991e539dd99ceb2b9b1b43 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Aug 2022 12:17:48 +0100 Subject: Fix bug where we wedge media plugins if clients disconnect early (#13660) We incorrectly didn't use the returned `Responder` if the client had disconnected, which meant that the resource used by the Responder wasn't correctly released. In particular, this exhausted the thread pools so that *all* requests timed out. --- changelog.d/13660.bugfix | 1 + synapse/rest/media/v1/_base.py | 40 +++++++++++++++++++++------------------- 2 files changed, 22 insertions(+), 19 deletions(-) create mode 100644 changelog.d/13660.bugfix diff --git a/changelog.d/13660.bugfix b/changelog.d/13660.bugfix new file mode 100644 index 0000000000..43859a4d65 --- /dev/null +++ b/changelog.d/13660.bugfix @@ -0,0 +1 @@ +Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index c35d42fab8..d30878f704 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -254,30 +254,32 @@ async def respond_with_responder( file_size: Size in bytes of the media. If not known it should be None upload_name: The name of the requested file, if any. """ - if request._disconnected: - logger.warning( - "Not sending response to request %s, already disconnected.", request - ) - return - if not responder: respond_404(request) return - logger.debug("Responding to media request with responder %s", responder) - add_file_headers(request, media_type, file_size, upload_name) - try: - with responder: + # If we have a responder we *must* use it as a context manager. + with responder: + if request._disconnected: + logger.warning( + "Not sending response to request %s, already disconnected.", request + ) + return + + logger.debug("Responding to media request with responder %s", responder) + add_file_headers(request, media_type, file_size, upload_name) + try: + await responder.write_to_consumer(request) - except Exception as e: - # The majority of the time this will be due to the client having gone - # away. Unfortunately, Twisted simply throws a generic exception at us - # in that case. - logger.warning("Failed to write to consumer: %s %s", type(e), e) - - # Unregister the producer, if it has one, so Twisted doesn't complain - if request.producer: - request.unregisterProducer() + except Exception as e: + # The majority of the time this will be due to the client having gone + # away. Unfortunately, Twisted simply throws a generic exception at us + # in that case. + logger.warning("Failed to write to consumer: %s %s", type(e), e) + + # Unregister the producer, if it has one, so Twisted doesn't complain + if request.producer: + request.unregisterProducer() finish_request(request) -- cgit 1.4.1 From 8f6aa015a80d1e34a25d100c5fd6ec4dde1fb728 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 Aug 2022 12:25:44 +0100 Subject: 1.66.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/13649.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/13649.bugfix diff --git a/CHANGES.md b/CHANGES.md index 14fafc260d..8790812f8e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.66.0rc2 (2022-08-30) +============================== + +Bugfixes +-------- + +- Fix rate limit gauge metrics registering twice and misreporting (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649)) + + Synapse 1.66.0rc1 (2022-08-23) ============================== diff --git a/changelog.d/13649.bugfix b/changelog.d/13649.bugfix deleted file mode 100644 index e6513a585a..0000000000 --- a/changelog.d/13649.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rate limit gauge metrics registering twice and misreporting (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). diff --git a/debian/changelog b/debian/changelog index c3974261a9..b42c99d81a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium + + * New Synapse release 1.66.0rc2. + + -- Synapse Packaging team Tue, 30 Aug 2022 12:25:19 +0100 + matrix-synapse-py3 (1.66.0~rc1) stable; urgency=medium * New Synapse release 1.66.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 745b6067aa..7146897837 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.66.0rc1" +version = "1.66.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From e761e8b475e26341d6d26ecc1499233c5f57c7ec Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 30 Aug 2022 08:21:19 -0400 Subject: Clarify documentation about replication traffic. (#13656) It can be authenticated with the worker_replication_secret setting, but is always unencrypted. --- changelog.d/13656.doc | 1 + docs/workers.md | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13656.doc diff --git a/changelog.d/13656.doc b/changelog.d/13656.doc new file mode 100644 index 0000000000..61013a0daf --- /dev/null +++ b/changelog.d/13656.doc @@ -0,0 +1 @@ +Clarify documentation that HTTP replication traffic can be protected with a shared secret. diff --git a/docs/workers.md b/docs/workers.md index 6969c424d8..dce584972d 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -120,7 +120,10 @@ redis: See the sample config for the full documentation of each option. Under **no circumstances** should the replication listener be exposed to the -public internet; it has no authentication and is unencrypted. +public internet; replication traffic is: + +* always unencrypted +* unauthenticated, unless `worker_replication_secret` is configured ### Worker configuration -- cgit 1.4.1 From 31f2a3fbc3f46b88cb400f69cd59f3551c1401f5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 Aug 2022 14:19:52 +0100 Subject: Update changes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 8790812f8e..a39fe661ce 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.66.0rc2 (2022-08-30) Bugfixes -------- -- Fix rate limit gauge metrics registering twice and misreporting (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649)) +- Fix a bug introduced in Synapse 1.66.0rc1 where the new rate limit metrics were misreported (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649)) Synapse 1.66.0rc1 (2022-08-23) -- cgit 1.4.1 From 372136d3a8c6cd0d7506ae75b18128476ba368e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 30 Aug 2022 18:01:51 +0100 Subject: Remove documentation of legacy `frontend_proxy` worker app (#13645) This has been the same as a generic_worker since #6964, so let's get rid of it. Fixes #3717 --- changelog.d/13645.doc | 1 + .../workers/generic_worker.yaml | 2 + docs/workers.md | 61 +++++++--------------- 3 files changed, 23 insertions(+), 41 deletions(-) create mode 100644 changelog.d/13645.doc diff --git a/changelog.d/13645.doc b/changelog.d/13645.doc new file mode 100644 index 0000000000..04c302ec2c --- /dev/null +++ b/changelog.d/13645.doc @@ -0,0 +1 @@ +Remove documentation of legacy `frontend_proxy` worker app. diff --git a/docs/systemd-with-workers/workers/generic_worker.yaml b/docs/systemd-with-workers/workers/generic_worker.yaml index a82f9c161f..6e7b60886e 100644 --- a/docs/systemd-with-workers/workers/generic_worker.yaml +++ b/docs/systemd-with-workers/workers/generic_worker.yaml @@ -5,6 +5,8 @@ worker_name: generic_worker1 worker_replication_host: 127.0.0.1 worker_replication_http_port: 9093 +worker_main_http_uri: http://localhost:8008/ + worker_listeners: - type: http port: 8083 diff --git a/docs/workers.md b/docs/workers.md index dce584972d..176bb1475e 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -117,7 +117,7 @@ redis: enabled: true ``` -See the sample config for the full documentation of each option. +See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option. Under **no circumstances** should the replication listener be exposed to the public internet; replication traffic is: @@ -128,15 +128,15 @@ public internet; replication traffic is: ### Worker configuration -In the config file for each worker, you must specify the type of worker -application (`worker_app`), and you should specify a unique name for the worker -(`worker_name`). The currently available worker applications are listed below. -You must also specify the HTTP replication endpoint that it should talk to on -the main synapse process. `worker_replication_host` should specify the host of -the main synapse and `worker_replication_http_port` should point to the HTTP -replication port. If the worker will handle HTTP requests then the -`worker_listeners` option should be set with a `http` listener, in the same way -as the `listeners` option in the shared config. +In the config file for each worker, you must specify: + * The type of worker (`worker_app`). The currently available worker applications are listed below. + * A unique name for the worker (`worker_name`). + * The HTTP replication endpoint that it should talk to on the main synapse process + (`worker_replication_host` and `worker_replication_http_port`) + * If handling HTTP requests, a `worker_listeners` option with an `http` + listener, in the same way as the `listeners` option in the shared config. + * If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for + the main process (`worker_main_http_uri`). For example: @@ -220,10 +220,12 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ # Encryption requests + # Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri` ^/_matrix/client/(r0|v3|unstable)/keys/query$ ^/_matrix/client/(r0|v3|unstable)/keys/changes$ ^/_matrix/client/(r0|v3|unstable)/keys/claim$ ^/_matrix/client/(r0|v3|unstable)/room_keys/ + ^/_matrix/client/(r0|v3|unstable)/keys/upload/ # Registration/login requests ^/_matrix/client/(api/v1|r0|v3|unstable)/login$ @@ -584,39 +586,16 @@ handle it, and are online. If `update_user_directory` is set to `false`, and this worker is not running, the above endpoint may give outdated results. -### `synapse.app.frontend_proxy` - -Proxies some frequently-requested client endpoints to add caching and remove -load from the main synapse. It can handle REST endpoints matching the following -regular expressions: - - ^/_matrix/client/(r0|v3|unstable)/keys/upload - -If `use_presence` is False in the homeserver config, it can also handle REST -endpoints matching the following regular expressions: - - ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status - -This "stub" presence handler will pass through `GET` request but make the -`PUT` effectively a no-op. - -It will proxy any requests it cannot handle to the main synapse instance. It -must therefore be configured with the location of the main instance, via -the `worker_main_http_uri` setting in the `frontend_proxy` worker configuration -file. For example: - -```yaml -worker_main_http_uri: http://127.0.0.1:8008 -``` - ### Historical apps -*Note:* Historically there used to be more apps, however they have been -amalgamated into a single `synapse.app.generic_worker` app. The remaining apps -are ones that do specific processing unrelated to requests, e.g. the `pusher` -that handles sending out push notifications for new events. The intention is for -all these to be folded into the `generic_worker` app and to use config to define -which processes handle the various proccessing such as push notifications. +The following used to be separate worker application types, but are now +equivalent to `synapse.app.generic_worker`: + + * `synapse.app.client_reader` + * `synapse.app.event_creator` + * `synapse.app.federation_reader` + * `synapse.app.frontend_proxy` + * `synapse.app.synchrotron` ## Migration from old config -- cgit 1.4.1 From 20c76cecb9eb84dadfa7b2d25b436d3ab9218a1a Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 30 Aug 2022 10:29:16 -0700 Subject: Drop unused column `application_services_state.last_txn` (#13627) --- changelog.d/13627.misc | 1 + synapse/storage/schema/__init__.py | 1 + ...pplication_services_state_last_txn.sql.postgres | 17 +++++++++ ..._application_services_state_last_txn.sql.sqlite | 40 ++++++++++++++++++++++ 4 files changed, 59 insertions(+) create mode 100644 changelog.d/13627.misc create mode 100644 synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres create mode 100644 synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite diff --git a/changelog.d/13627.misc b/changelog.d/13627.misc new file mode 100644 index 0000000000..1a15709aff --- /dev/null +++ b/changelog.d/13627.misc @@ -0,0 +1 @@ +Drop unused column `application_services_state.last_txn`. diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index a9a88c8bfd..256f745dc0 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -75,6 +75,7 @@ Changes in SCHEMA_VERSION = 71: Changes in SCHEMA_VERSION = 72: - event_edges.(room_id, is_state) are no longer written to. - Tables related to groups are dropped. + - Unused column application_services_state.last_txn is dropped """ diff --git a/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres b/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres new file mode 100644 index 0000000000..13d47de9e6 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres @@ -0,0 +1,17 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Drop unused column application_services_state.last_txn +ALTER table application_services_state DROP COLUMN last_txn; \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite b/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite new file mode 100644 index 0000000000..3be1c88d72 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite @@ -0,0 +1,40 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Drop unused column application_services_state.last_txn + +CREATE TABLE application_services_state2 ( + as_id TEXT PRIMARY KEY NOT NULL, + state VARCHAR(5), + read_receipt_stream_id BIGINT, + presence_stream_id BIGINT, + to_device_stream_id BIGINT, + device_list_stream_id BIGINT +); + + +INSERT INTO application_services_state2 ( + as_id, + state, + read_receipt_stream_id, + presence_stream_id, + to_device_stream_id, + device_list_stream_id +) +SELECT as_id, state, read_receipt_stream_id, presence_stream_id, to_device_stream_id, device_list_stream_id +FROM application_services_state; + +DROP TABLE application_services_state; +ALTER TABLE application_services_state2 RENAME TO application_services_state; -- cgit 1.4.1 From 92c5817e34cb421dbe2c0df275238e84866932a8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 30 Aug 2022 14:50:06 -0500 Subject: Give the correct next event when the message timestamps are the same - MSC3030 (#13658) Discovered while working on https://github.com/matrix-org/synapse/pull/13589 and I had all the messages at the same timestamp in the tests. Part of https://github.com/matrix-org/matrix-spec-proposals/pull/3030 Complement tests: https://github.com/matrix-org/complement/pull/457 --- changelog.d/13658.bugfix | 1 + synapse/storage/databases/main/events_worker.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13658.bugfix diff --git a/changelog.d/13658.bugfix b/changelog.d/13658.bugfix new file mode 100644 index 0000000000..8740f066bb --- /dev/null +++ b/changelog.d/13658.bugfix @@ -0,0 +1 @@ +Fix MSC3030 `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 8a7cdb024d..9b997c304d 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -2111,7 +2111,14 @@ class EventsWorkerStore(SQLBaseStore): AND room_id = ? /* Make sure event is not rejected */ AND rejections.event_id IS NULL - ORDER BY origin_server_ts %s + /** + * First sort by the message timestamp. If the message timestamps are the + * same, we want the message that logically comes "next" (before/after + * the given timestamp) based on the DAG and its topological order (`depth`). + * Finally, we can tie-break based on when it was received on the server + * (`stream_ordering`). + */ + ORDER BY origin_server_ts %s, depth %s, stream_ordering %s LIMIT 1; """ @@ -2130,7 +2137,8 @@ class EventsWorkerStore(SQLBaseStore): order = "ASC" txn.execute( - sql_template % (comparison_operator, order), (timestamp, room_id) + sql_template % (comparison_operator, order, order, order), + (timestamp, room_id), ) row = txn.fetchone() if row: -- cgit 1.4.1 From 61b37ddd37dbe6c54576a97be7f85ad9735252c0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 31 Aug 2022 05:43:00 -0400 Subject: Remind people that direct TCP replication is disabled. (#13674) --- CHANGES.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index a39fe661ce..0f822556ec 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,11 @@ +Synapse 1.66.0 +============== + +Deployments with multiple workers should note that the direct TCP replication +configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse +v1.67.0. See [docs/workers.md](https://github.com/matrix-org/synapse/blob/release-v1.18.0/docs/workers.md) +for more details. + Synapse 1.66.0rc2 (2022-08-30) ============================== -- cgit 1.4.1 From b9924df264884634092465b9d2edac3393768f2e Mon Sep 17 00:00:00 2001 From: Jörg Behrmann Date: Wed, 31 Aug 2022 12:15:28 +0200 Subject: Change dpkg-statoverride to use --force-statoverride-add (#13638) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The --force flag of dpkg-statoverride has been deprecated (apparently starting with the dpkg version in Debian buster). It offers --force-all as q quick fix, but the usage in the Debian postinst script is probably covered by --force-statoverride-add. Fixes: #8391 Signed-off-by: Jörg Behrmann --- debian/changelog | 1 + debian/matrix-synapse-py3.postinst | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 0709ae24e9..6a6f306019 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,7 @@ matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium * Update debhelper to compatibility level 12. * Drop the preinst script stopping synapse. * Allocate a group for the system user. + * Change dpkg-statoverride to --force-statoverride-add. -- Jörg Behrmann Tue, 23 Aug 2022 17:17:00 +0100 diff --git a/debian/matrix-synapse-py3.postinst b/debian/matrix-synapse-py3.postinst index 3c72b69bb7..acab0877ad 100644 --- a/debian/matrix-synapse-py3.postinst +++ b/debian/matrix-synapse-py3.postinst @@ -45,7 +45,7 @@ EOF for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then - dpkg-statoverride --force --quiet --update --add $USER "$(id -gn $USER)" 0755 $DIR + dpkg-statoverride --force-statoverride-add --quiet --update --add $USER "$(id -gn $USER)" 0755 $DIR fi done -- cgit 1.4.1 From d48b70fd37888346fc6a4c800ecfaa6a2395d087 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Wed, 31 Aug 2022 11:18:56 +0100 Subject: Update changelog for v1.62.0 --- CHANGES.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 0f822556ec..9ba9498344 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -398,6 +398,20 @@ No significant changes since 1.62.0rc3. Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse. +## Security advisory + +The following issue is fixed in 1.62.0. + +* [GHSA-jhjh-776m-4765](https://github.com/matrix-org/synapse/security/advisories/GHSA-jhjh-776m-4765) / [CVE-2022-31152](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31152) + + Synapse instances prior to 1.62.0 did not implement the Matrix [event authorization rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules) correctly. An attacker could craft events which would be accepted by Synapse but not a spec-conformant server, potentially causing divergence in the room state between servers. + + Homeservers with federation disabled via the [`federation_domain_whitelist`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_domain_whitelist) config option are unaffected. + + Administrators of homeservers with federation enabled are advised to upgrade to v1.62.0 or higher. + + Fixed by [#13087](https://github.com/matrix-org/synapse/pull/13087) and [#13088](https://github.com/matrix-org/synapse/pull/13088). + Synapse 1.62.0rc3 (2022-07-04) ============================== -- cgit 1.4.1 From ef88bc0775b7c01dc3abfbfca3e8aaa566b1871d Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Wed, 31 Aug 2022 11:21:09 +0100 Subject: 1.66.0 --- CHANGES.md | 7 +++++-- debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9ba9498344..c890f64d25 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,11 +1,14 @@ -Synapse 1.66.0 -============== +Synapse 1.66.0 (2022-08-31) +=========================== + +No significant changes since 1.66.0rc2. Deployments with multiple workers should note that the direct TCP replication configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse v1.67.0. See [docs/workers.md](https://github.com/matrix-org/synapse/blob/release-v1.18.0/docs/workers.md) for more details. + Synapse 1.66.0rc2 (2022-08-30) ============================== diff --git a/debian/changelog b/debian/changelog index b42c99d81a..7ff1428842 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.66.0) stable; urgency=medium + + * New Synapse release 1.66.0. + + -- Synapse Packaging team Wed, 31 Aug 2022 11:20:17 +0100 + matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium * New Synapse release 1.66.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 7146897837..a41d88ea74 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.66.0rc2" +version = "1.66.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From 5634267d33fd55131dfb0789e5510247a0b6e8f2 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Wed, 31 Aug 2022 11:31:51 +0100 Subject: Update changelog to link to the Synapse docs instead of markdown --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index c890f64d25..f502866f76 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ No significant changes since 1.66.0rc2. Deployments with multiple workers should note that the direct TCP replication configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse -v1.67.0. See [docs/workers.md](https://github.com/matrix-org/synapse/blob/release-v1.18.0/docs/workers.md) +v1.67.0. See the [worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) for more details. -- cgit 1.4.1 From a160406d245cb84b48fa67fe3ee73f0cffceb495 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 31 Aug 2022 11:38:16 +0100 Subject: Fix admin List Room API return type on sqlite (#13509) --- changelog.d/13509.bugfix | 1 + synapse/storage/databases/main/room.py | 6 ++++-- tests/rest/admin/test_room.py | 19 ++++++++++++++----- 3 files changed, 19 insertions(+), 7 deletions(-) create mode 100644 changelog.d/13509.bugfix diff --git a/changelog.d/13509.bugfix b/changelog.d/13509.bugfix new file mode 100644 index 0000000000..6dcb9741d9 --- /dev/null +++ b/changelog.d/13509.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index b7d4baa6bb..367424b4a8 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -641,8 +641,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): "version": room[5], "creator": room[6], "encryption": room[7], - "federatable": room[8], - "public": room[9], + # room_stats_state.federatable is an integer on sqlite. + "federatable": bool(room[8]), + # rooms.is_public is an integer on sqlite. + "public": bool(room[9]), "join_rules": room[10], "guest_access": room[11], "history_visibility": room[12], diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index fd6da557c1..9d71a97524 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1080,7 +1080,9 @@ class RoomTestCase(unittest.HomeserverTestCase): room_ids = [] for _ in range(total_rooms): room_id = self.helper.create_room_as( - self.admin_user, tok=self.admin_user_tok + self.admin_user, + tok=self.admin_user_tok, + is_public=True, ) room_ids.append(room_id) @@ -1119,8 +1121,8 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertIn("version", r) self.assertIn("creator", r) self.assertIn("encryption", r) - self.assertIn("federatable", r) - self.assertIn("public", r) + self.assertIs(r["federatable"], True) + self.assertIs(r["public"], True) self.assertIn("join_rules", r) self.assertIn("guest_access", r) self.assertIn("history_visibility", r) @@ -1587,8 +1589,12 @@ class RoomTestCase(unittest.HomeserverTestCase): def test_single_room(self) -> None: """Test that a single room can be requested correctly""" # Create two test rooms - room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) - room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + room_id_1 = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok, is_public=True + ) + room_id_2 = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok, is_public=False + ) room_name_1 = "something" room_name_2 = "else" @@ -1634,7 +1640,10 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertIn("state_events", channel.json_body) self.assertIn("room_type", channel.json_body) self.assertIn("forgotten", channel.json_body) + self.assertEqual(room_id_1, channel.json_body["room_id"]) + self.assertIs(True, channel.json_body["federatable"]) + self.assertIs(True, channel.json_body["public"]) def test_single_room_devices(self) -> None: """Test that `joined_local_devices` can be requested correctly""" -- cgit 1.4.1 From 90c99fb3aab2d371039cf4aaa61305928e77230d Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Wed, 31 Aug 2022 11:53:30 +0100 Subject: Fix dead link in 1.18.0 upgrade notes --- docs/upgrade.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/upgrade.md b/docs/upgrade.md index 0ab5bfeaf0..51719f8c73 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -1200,7 +1200,7 @@ updated. When setting up worker processes, we now recommend the use of a Redis server for replication. **The old direct TCP connection method is deprecated and will be removed in a future release.** See -[workers](workers.md) for more details. +the [worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) for more details. # Upgrading to v1.14.0 -- cgit 1.4.1 From 7bc110a19e6de0572b0c9513726d13298b45ced2 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 31 Aug 2022 11:16:05 +0000 Subject: Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. (#13662) --- changelog.d/13662.misc | 1 + synapse/federation/transport/server/_base.py | 5 +- synapse/http/server.py | 68 ++----------------------- synapse/replication/http/_base.py | 7 +-- synapse/rest/client/room.py | 3 +- synapse/util/cancellation.py | 56 ++++++++++++++++++++ tests/federation/transport/server/test__base.py | 3 +- tests/http/test_servlet.py | 2 +- tests/replication/http/test__base.py | 3 +- tests/test_server.py | 2 +- 10 files changed, 75 insertions(+), 75 deletions(-) create mode 100644 changelog.d/13662.misc create mode 100644 synapse/util/cancellation.py diff --git a/changelog.d/13662.misc b/changelog.d/13662.misc new file mode 100644 index 0000000000..3dea4a1c2c --- /dev/null +++ b/changelog.d/13662.misc @@ -0,0 +1 @@ +Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. \ No newline at end of file diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index bb0f8d6b7b..1db8009d6c 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -21,7 +21,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tupl from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.urls import FEDERATION_V1_PREFIX -from synapse.http.server import HttpServer, ServletCallback, is_method_cancellable +from synapse.http.server import HttpServer, ServletCallback from synapse.http.servlet import parse_json_object_from_request from synapse.http.site import SynapseRequest from synapse.logging.context import run_in_background @@ -34,6 +34,7 @@ from synapse.logging.opentracing import ( whitelisted_homeserver, ) from synapse.types import JsonDict +from synapse.util.cancellation import is_function_cancellable from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import parse_and_validate_server_name @@ -375,7 +376,7 @@ class BaseFederationServlet: if code is None: continue - if is_method_cancellable(code): + if is_function_cancellable(code): # The wrapper added by `self._wrap` will inherit the cancellable flag, # but the wrapper itself does not support cancellation yet. # Once resolved, the cancellation tests in diff --git a/synapse/http/server.py b/synapse/http/server.py index 19f42159b8..6068a94b40 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -33,7 +33,6 @@ from typing import ( Optional, Pattern, Tuple, - TypeVar, Union, ) @@ -64,6 +63,7 @@ from synapse.logging.context import defer_to_thread, preserve_fn, run_in_backgro from synapse.logging.opentracing import active_span, start_active_span, trace_servlet from synapse.util import json_encoder from synapse.util.caches import intern_dict +from synapse.util.cancellation import is_function_cancellable from synapse.util.iterutils import chunk_seq if TYPE_CHECKING: @@ -94,68 +94,6 @@ HTML_ERROR_TEMPLATE = """ HTTP_STATUS_REQUEST_CANCELLED = 499 -F = TypeVar("F", bound=Callable[..., Any]) - - -_cancellable_method_names = frozenset( - { - # `RestServlet`, `BaseFederationServlet` and `BaseFederationServerServlet` - # methods - "on_GET", - "on_PUT", - "on_POST", - "on_DELETE", - # `_AsyncResource`, `DirectServeHtmlResource` and `DirectServeJsonResource` - # methods - "_async_render_GET", - "_async_render_PUT", - "_async_render_POST", - "_async_render_DELETE", - "_async_render_OPTIONS", - # `ReplicationEndpoint` methods - "_handle_request", - } -) - - -def cancellable(method: F) -> F: - """Marks a servlet method as cancellable. - - Methods with this decorator will be cancelled if the client disconnects before we - finish processing the request. - - During cancellation, `Deferred.cancel()` will be invoked on the `Deferred` wrapping - the method. The `cancel()` call will propagate down to the `Deferred` that is - currently being waited on. That `Deferred` will raise a `CancelledError`, which will - propagate up, as per normal exception handling. - - Before applying this decorator to a new endpoint, you MUST recursively check - that all `await`s in the function are on `async` functions or `Deferred`s that - handle cancellation cleanly, otherwise a variety of bugs may occur, ranging from - premature logging context closure, to stuck requests, to database corruption. - - Usage: - class SomeServlet(RestServlet): - @cancellable - async def on_GET(self, request: SynapseRequest) -> ...: - ... - """ - if method.__name__ not in _cancellable_method_names and not any( - method.__name__.startswith(prefix) for prefix in _cancellable_method_names - ): - raise ValueError( - "@cancellable decorator can only be applied to servlet methods." - ) - - method.cancellable = True # type: ignore[attr-defined] - return method - - -def is_method_cancellable(method: Callable[..., Any]) -> bool: - """Checks whether a servlet method has the `@cancellable` flag.""" - return getattr(method, "cancellable", False) - - def return_json_error( f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig] ) -> None: @@ -389,7 +327,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): method_handler = getattr(self, "_async_render_%s" % (request_method,), None) if method_handler: - request.is_render_cancellable = is_method_cancellable(method_handler) + request.is_render_cancellable = is_function_cancellable(method_handler) raw_callback_return = method_handler(request) @@ -551,7 +489,7 @@ class JsonResource(DirectServeJsonResource): async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]: callback, servlet_classname, group_dict = self._get_handler_for_request(request) - request.is_render_cancellable = is_method_cancellable(callback) + request.is_render_cancellable = is_function_cancellable(callback) # Make sure we have an appropriate name for this handler in prometheus # (rather than the default of JsonResource). diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 561ad5bf04..acb0bd18f7 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -26,12 +26,13 @@ from twisted.web.server import Request from synapse.api.errors import HttpResponseException, SynapseError from synapse.http import RequestTimedOutError -from synapse.http.server import HttpServer, is_method_cancellable +from synapse.http.server import HttpServer from synapse.http.site import SynapseRequest from synapse.logging import opentracing from synapse.logging.opentracing import trace_with_opname from synapse.types import JsonDict from synapse.util.caches.response_cache import ResponseCache +from synapse.util.cancellation import is_function_cancellable from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -311,7 +312,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): url_args = list(self.PATH_ARGS) method = self.METHOD - if self.CACHE and is_method_cancellable(self._handle_request): + if self.CACHE and is_function_cancellable(self._handle_request): raise Exception( f"{self.__class__.__name__} has been marked as cancellable, but CACHE " "is set. The cancellable flag would have no effect." @@ -359,6 +360,6 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): # The `@cancellable` decorator may be applied to `_handle_request`. But we # told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`, # so we have to set up the cancellable flag ourselves. - request.is_render_cancellable = is_method_cancellable(self._handle_request) + request.is_render_cancellable = is_function_cancellable(self._handle_request) return await self._handle_request(request, **kwargs) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 3259de4802..0e2834008e 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -37,7 +37,7 @@ from synapse.api.errors import ( ) from synapse.api.filtering import Filter from synapse.events.utils import format_event_for_client_v2 -from synapse.http.server import HttpServer, cancellable +from synapse.http.server import HttpServer from synapse.http.servlet import ( ResolveRoomIdMixin, RestServlet, @@ -57,6 +57,7 @@ from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID from synapse.util import json_decoder +from synapse.util.cancellation import cancellable from synapse.util.stringutils import parse_and_validate_server_name, random_string if TYPE_CHECKING: diff --git a/synapse/util/cancellation.py b/synapse/util/cancellation.py new file mode 100644 index 0000000000..472d2e3aeb --- /dev/null +++ b/synapse/util/cancellation.py @@ -0,0 +1,56 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, TypeVar + +F = TypeVar("F", bound=Callable[..., Any]) + + +def cancellable(function: F) -> F: + """Marks a function as cancellable. + + Servlet methods with this decorator will be cancelled if the client disconnects before we + finish processing the request. + + Although this annotation is particularly useful for servlet methods, it's also + useful for intermediate functions, where it documents the fact that the function has + been audited for cancellation safety and needs to preserve that. + This then simplifies auditing new functions that call those same intermediate + functions. + + During cancellation, `Deferred.cancel()` will be invoked on the `Deferred` wrapping + the method. The `cancel()` call will propagate down to the `Deferred` that is + currently being waited on. That `Deferred` will raise a `CancelledError`, which will + propagate up, as per normal exception handling. + + Before applying this decorator to a new function, you MUST recursively check + that all `await`s in the function are on `async` functions or `Deferred`s that + handle cancellation cleanly, otherwise a variety of bugs may occur, ranging from + premature logging context closure, to stuck requests, to database corruption. + + See the documentation page on Cancellation for more information. + + Usage: + class SomeServlet(RestServlet): + @cancellable + async def on_GET(self, request: SynapseRequest) -> ...: + ... + """ + + function.cancellable = True # type: ignore[attr-defined] + return function + + +def is_function_cancellable(function: Callable[..., Any]) -> bool: + """Checks whether a servlet method has the `@cancellable` flag.""" + return getattr(function, "cancellable", False) diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py index d33e86db4c..e88e5d8bb3 100644 --- a/tests/federation/transport/server/test__base.py +++ b/tests/federation/transport/server/test__base.py @@ -18,9 +18,10 @@ from typing import Dict, List, Tuple from synapse.api.errors import Codes from synapse.federation.transport.server import BaseFederationServlet from synapse.federation.transport.server._base import Authenticator, _parse_auth_header -from synapse.http.server import JsonResource, cancellable +from synapse.http.server import JsonResource from synapse.server import HomeServer from synapse.types import JsonDict +from synapse.util.cancellation import cancellable from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index bb966c80c6..3cbca0f5a3 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -18,7 +18,6 @@ from typing import Tuple from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError -from synapse.http.server import cancellable from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, @@ -28,6 +27,7 @@ from synapse.http.site import SynapseRequest from synapse.rest.client._base import client_patterns from synapse.server import HomeServer from synapse.types import JsonDict +from synapse.util.cancellation import cancellable from tests import unittest from tests.http.server._base import test_disconnect diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index 822a957c3a..936ab4504a 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -18,11 +18,12 @@ from typing import Tuple from twisted.web.server import Request from synapse.api.errors import Codes -from synapse.http.server import JsonResource, cancellable +from synapse.http.server import JsonResource from synapse.replication.http import REPLICATION_PREFIX from synapse.replication.http._base import ReplicationEndpoint from synapse.server import HomeServer from synapse.types import JsonDict +from synapse.util.cancellation import cancellable from tests import unittest from tests.http.server._base import test_disconnect diff --git a/tests/test_server.py b/tests/test_server.py index d2b2d8344a..23975d59c3 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -26,12 +26,12 @@ from synapse.http.server import ( DirectServeJsonResource, JsonResource, OptionsResource, - cancellable, ) from synapse.http.site import SynapseRequest, SynapseSite from synapse.logging.context import make_deferred_yieldable from synapse.types import JsonDict from synapse.util import Clock +from synapse.util.cancellation import cancellable from tests import unittest from tests.http.server._base import test_disconnect -- cgit 1.4.1 From 42b11d5565ed026c7d71f433c69e7b7007f45918 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Wed, 31 Aug 2022 12:19:39 +0100 Subject: Remove cached wrap on `_get_joined_users_from_context` method (#13569) The method doesn't actually do any data fetching and the method that does, `_get_joined_profile_from_event_id`, has its own cache. Signed off by Nick @ Beeper (@Fizzadar). --- changelog.d/13569.removal | 1 + synapse/state/__init__.py | 2 +- synapse/storage/databases/main/roommember.py | 122 +++++++++------------------ 3 files changed, 40 insertions(+), 85 deletions(-) create mode 100644 changelog.d/13569.removal diff --git a/changelog.d/13569.removal b/changelog.d/13569.removal new file mode 100644 index 0000000000..af9d407671 --- /dev/null +++ b/changelog.d/13569.removal @@ -0,0 +1 @@ +Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 3047e1b1ad..3787d35b24 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -232,7 +232,7 @@ class StateHandler: logger.debug("calling resolve_state_groups from get_current_user_ids_in_room") entry = await self.resolve_state_groups_for_events(room_id, latest_event_ids) state = await entry.get_state(self._state_storage_controller, StateFilter.all()) - return await self.store.get_joined_user_ids_from_state(room_id, state, entry) + return await self.store.get_joined_user_ids_from_state(room_id, state) async def get_hosts_in_room_at_events( self, room_id: str, event_ids: Collection[str] diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 06500457bd..4f0adb136a 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -31,7 +31,6 @@ from typing import ( import attr from synapse.api.constants import EventTypes, Membership -from synapse.events import EventBase from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import ( run_as_background_process, @@ -883,96 +882,51 @@ class RoomMemberWorkerStore(EventsWorkerStore): return shared_room_ids or frozenset() async def get_joined_user_ids_from_state( - self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry" + self, room_id: str, state: StateMap[str] ) -> Set[str]: - state_group: Union[object, int] = state_entry.state_group - if not state_group: - # If state_group is None it means it has yet to be assigned a - # state group, i.e. we need to make sure that calls with a state_group - # of None don't hit previous cached calls with a None state_group. - # To do this we set the state_group to a new object as object() != object() - state_group = object() - - assert state_group is not None - with Measure(self._clock, "get_joined_users_from_state"): - return await self._get_joined_user_ids_from_context( - room_id, state_group, state, context=state_entry - ) + """ + For a given set of state IDs, get a set of user IDs in the room. - @cached(num_args=2, iterable=True, max_entries=100000) - async def _get_joined_user_ids_from_context( - self, - room_id: str, - state_group: Union[object, int], - current_state_ids: StateMap[str], - event: Optional[EventBase] = None, - context: Optional["_StateCacheEntry"] = None, - ) -> Set[str]: - # We don't use `state_group`, it's there so that we can cache based - # on it. However, it's important that it's never None, since two current_states - # with a state_group of None are likely to be different. - assert state_group is not None + This method checks the local event cache, before calling + `_get_user_ids_from_membership_event_ids` for any uncached events. + """ - users_in_room = set() - member_event_ids = [ - e_id - for key, e_id in current_state_ids.items() - if key[0] == EventTypes.Member - ] - - if context is not None: - # If we have a context with a delta from a previous state group, - # check if we also have the result from the previous group in cache. - # If we do then we can reuse that result and simply update it with - # any membership changes in `delta_ids` - if context.prev_group and context.delta_ids: - prev_res = self._get_joined_user_ids_from_context.cache.get_immediate( - (room_id, context.prev_group), None - ) - if prev_res and isinstance(prev_res, set): - users_in_room = prev_res - member_event_ids = [ - e_id - for key, e_id in context.delta_ids.items() - if key[0] == EventTypes.Member - ] - for etype, state_key in context.delta_ids: - if etype == EventTypes.Member: - users_in_room.discard(state_key) - - # We check if we have any of the member event ids in the event cache - # before we ask the DB - - # We don't update the event cache hit ratio as it completely throws off - # the hit ratio counts. After all, we don't populate the cache if we - # miss it here - event_map = self._get_events_from_local_cache( - member_event_ids, update_metrics=False - ) + with Measure(self._clock, "get_joined_user_ids_from_state"): + users_in_room = set() + member_event_ids = [ + e_id for key, e_id in state.items() if key[0] == EventTypes.Member + ] - missing_member_event_ids = [] - for event_id in member_event_ids: - ev_entry = event_map.get(event_id) - if ev_entry and not ev_entry.event.rejected_reason: - if ev_entry.event.membership == Membership.JOIN: - users_in_room.add(ev_entry.event.state_key) - else: - missing_member_event_ids.append(event_id) + # We check if we have any of the member event ids in the event cache + # before we ask the DB - if missing_member_event_ids: - event_to_memberships = await self._get_user_ids_from_membership_event_ids( - missing_member_event_ids - ) - users_in_room.update( - user_id for user_id in event_to_memberships.values() if user_id + # We don't update the event cache hit ratio as it completely throws off + # the hit ratio counts. After all, we don't populate the cache if we + # miss it here + event_map = self._get_events_from_local_cache( + member_event_ids, update_metrics=False ) - if event is not None and event.type == EventTypes.Member: - if event.membership == Membership.JOIN: - if event.event_id in member_event_ids: - users_in_room.add(event.state_key) + missing_member_event_ids = [] + for event_id in member_event_ids: + ev_entry = event_map.get(event_id) + if ev_entry and not ev_entry.event.rejected_reason: + if ev_entry.event.membership == Membership.JOIN: + users_in_room.add(ev_entry.event.state_key) + else: + missing_member_event_ids.append(event_id) + + if missing_member_event_ids: + event_to_memberships = ( + await self._get_user_ids_from_membership_event_ids( + missing_member_event_ids + ) + ) + users_in_room.update( + user_id for user_id in event_to_memberships.values() if user_id + ) - return users_in_room + return users_in_room @cached( max_entries=10000, @@ -1205,7 +1159,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): # The cache doesn't match the state group or prev state group, # so we calculate the result from first principles. joined_user_ids = await self.get_joined_user_ids_from_state( - room_id, state, state_entry + room_id, state ) cache.hosts_to_joined_users = {} -- cgit 1.4.1 From d1fb46fbc987fc0f2672780e373db83c7dacb6cf Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Wed, 31 Aug 2022 12:19:40 +0100 Subject: Improve clarity on deprecation of TCP replication Borrows some text from https://github.com/matrix-org/synapse/pull/13647 for the changelog. --- CHANGES.md | 10 +++++++++- docs/usage/configuration/config_documentation.md | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f502866f76..712d3b134a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,15 @@ No significant changes since 1.66.0rc2. Deployments with multiple workers should note that the direct TCP replication configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse -v1.67.0. See the [worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) +v1.67.0. In particular, the TCP `replication` listener and the +`worker_replication_port` config option are deprecated. + +To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration) +and remove the TCP `replication` listener from config of the master and +`worker_replication_port` from worker config. Note that a HTTP listener with a +`replication` resource is still required. + +See the [worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) for more details. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 8ae018e628..5dee38d28d 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -431,7 +431,7 @@ Sub-options for each listener include: * `metrics`: (see the docs [here](../../metrics-howto.md)), - * `replication`: (see the docs [here](../../workers.md)). + * `replication`: (deprecated as of Synapse 1.18, see the docs [here](../../workers.md)). * `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path. -- cgit 1.4.1 From c01f21d31da83e3ad7844d40b293df941f304663 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Wed, 31 Aug 2022 12:35:25 +0100 Subject: Tweak changelog wording --- CHANGES.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 712d3b134a..7e9862f61d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,11 +5,12 @@ No significant changes since 1.66.0rc2. Deployments with multiple workers should note that the direct TCP replication configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse -v1.67.0. In particular, the TCP `replication` listener and the -`worker_replication_port` config option are deprecated. +v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners) +type (not to be confused with the `replication` resource on the `http` listener +type) and the `worker_replication_port` config option will be removed . -To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration) -and remove the TCP `replication` listener from config of the master and +To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration), +then remove the TCP `replication` listener from config of the master and `worker_replication_port` from worker config. Note that a HTTP listener with a `replication` resource is still required. -- cgit 1.4.1 From 838d722eba6c3f9c9e3e6433b3771c994f75a2ec Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Wed, 31 Aug 2022 12:40:14 +0100 Subject: Move notice from 1.66.0rc1 to 1.66.0 section in changelog --- CHANGES.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 7e9862f61d..535fd36a0e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,12 @@ Synapse 1.66.0 (2022-08-31) No significant changes since 1.66.0rc2. +This release removes the ability for homeservers to delegate email ownership +verification and password reset confirmation to identity servers. This removal +was originally planned for Synapse 1.64, but was later deferred until now. + +See the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. + Deployments with multiple workers should note that the direct TCP replication configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners) @@ -30,12 +36,6 @@ Bugfixes Synapse 1.66.0rc1 (2022-08-23) ============================== -This release removes the ability for homeservers to delegate email ownership -verification and password reset confirmation to identity servers. This removal -was originally planned for Synapse 1.64, but was later deferred until now. - -See the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. - Features -------- -- cgit 1.4.1 From 6f80fe1e1bbb6cab3ce605b2023e0488e2d80d52 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Wed, 31 Aug 2022 12:51:57 +0100 Subject: Tweak changelog formatting --- CHANGES.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 535fd36a0e..0b10e90186 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,9 +5,8 @@ No significant changes since 1.66.0rc2. This release removes the ability for homeservers to delegate email ownership verification and password reset confirmation to identity servers. This removal -was originally planned for Synapse 1.64, but was later deferred until now. - -See the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. +was originally planned for Synapse 1.64, but was later deferred until now. See +the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. Deployments with multiple workers should note that the direct TCP replication configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse @@ -18,9 +17,8 @@ type) and the `worker_replication_port` config option will be removed . To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration), then remove the TCP `replication` listener from config of the master and `worker_replication_port` from worker config. Note that a HTTP listener with a -`replication` resource is still required. - -See the [worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) +`replication` resource is still required. See the +[worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) for more details. -- cgit 1.4.1 From 84ddcd7bbfe4100101741a408a91f283a8f742c7 Mon Sep 17 00:00:00 2001 From: Jacek Kuśnierz Date: Wed, 31 Aug 2022 14:10:25 +0200 Subject: Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token` (#13241) Fixes #13206 Signed-off-by: Jacek Kusnierz jacek.kusnierz@tum.de --- changelog.d/13241.removal | 1 + synapse/handlers/identity.py | 142 +++++------------------------- synapse/handlers/room.py | 20 ++++- synapse/handlers/room_member.py | 6 +- synapse/rest/client/room.py | 20 +++-- synapse/rest/media/v1/media_repository.py | 1 - tests/rest/client/test_identity.py | 3 +- tests/rest/client/test_rooms.py | 18 ++++ tests/rest/client/test_shadow_banned.py | 7 +- 9 files changed, 81 insertions(+), 137 deletions(-) create mode 100644 changelog.d/13241.removal diff --git a/changelog.d/13241.removal b/changelog.d/13241.removal new file mode 100644 index 0000000000..60b0e7969c --- /dev/null +++ b/changelog.d/13241.removal @@ -0,0 +1 @@ +Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. \ No newline at end of file diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 9571d461c8..93d09e9939 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -538,11 +538,7 @@ class IdentityHandler: raise SynapseError(400, "Error contacting the identity server") async def lookup_3pid( - self, - id_server: str, - medium: str, - address: str, - id_access_token: Optional[str] = None, + self, id_server: str, medium: str, address: str, id_access_token: str ) -> Optional[str]: """Looks up a 3pid in the passed identity server. @@ -557,60 +553,15 @@ class IdentityHandler: Returns: the matrix ID of the 3pid, or None if it is not recognized. """ - if id_access_token is not None: - try: - results = await self._lookup_3pid_v2( - id_server, id_access_token, medium, address - ) - return results - - except Exception as e: - # Catch HttpResponseExcept for a non-200 response code - # Check if this identity server does not know about v2 lookups - if isinstance(e, HttpResponseException) and e.code == 404: - # This is an old identity server that does not yet support v2 lookups - logger.warning( - "Attempted v2 lookup on v1 identity server %s. Falling " - "back to v1", - id_server, - ) - else: - logger.warning("Error when looking up hashing details: %s", e) - return None - - return await self._lookup_3pid_v1(id_server, medium, address) - - async def _lookup_3pid_v1( - self, id_server: str, medium: str, address: str - ) -> Optional[str]: - """Looks up a 3pid in the passed identity server using v1 lookup. - Args: - id_server: The server name (including port, if required) - of the identity server to use. - medium: The type of the third party identifier (e.g. "email"). - address: The third party identifier (e.g. "foo@example.com"). - - Returns: - the matrix ID of the 3pid, or None if it is not recognized. - """ try: - data = await self.blacklisting_http_client.get_json( - "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), - {"medium": medium, "address": address}, + results = await self._lookup_3pid_v2( + id_server, id_access_token, medium, address ) - - if "mxid" in data: - # note: we used to verify the identity server's signature here, but no longer - # require or validate it. See the following for context: - # https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950 - return data["mxid"] - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - except OSError as e: - logger.warning("Error from v1 identity server lookup: %s" % (e,)) - - return None + return results + except Exception as e: + logger.warning("Error when looking up hashing details: %s", e) + return None async def _lookup_3pid_v2( self, id_server: str, id_access_token: str, medium: str, address: str @@ -739,7 +690,7 @@ class IdentityHandler: room_type: Optional[str], inviter_display_name: str, inviter_avatar_url: str, - id_access_token: Optional[str] = None, + id_access_token: str, ) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]: """ Asks an identity server for a third party invite. @@ -760,7 +711,7 @@ class IdentityHandler: inviter_display_name: The current display name of the inviter. inviter_avatar_url: The URL of the inviter's avatar. - id_access_token (str|None): The access token to authenticate to the identity + id_access_token (str): The access token to authenticate to the identity server with Returns: @@ -792,71 +743,24 @@ class IdentityHandler: invite_config["org.matrix.web_client_location"] = self._web_client_location # Add the identity service access token to the JSON body and use the v2 - # Identity Service endpoints if id_access_token is present + # Identity Service endpoints data = None - base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server) - if id_access_token: - key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( - id_server_scheme, - id_server, - ) + key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( + id_server_scheme, + id_server, + ) - # Attempt a v2 lookup - url = base_url + "/v2/store-invite" - try: - data = await self.blacklisting_http_client.post_json_get_json( - url, - invite_config, - {"Authorization": create_id_access_token_header(id_access_token)}, - ) - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - except HttpResponseException as e: - if e.code != 404: - logger.info("Failed to POST %s with JSON: %s", url, e) - raise e - - if data is None: - key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( - id_server_scheme, - id_server, + url = "%s%s/_matrix/identity/v2/store-invite" % (id_server_scheme, id_server) + try: + data = await self.blacklisting_http_client.post_json_get_json( + url, + invite_config, + {"Authorization": create_id_access_token_header(id_access_token)}, ) - url = base_url + "/api/v1/store-invite" - - try: - data = await self.blacklisting_http_client.post_json_get_json( - url, invite_config - ) - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - except HttpResponseException as e: - logger.warning( - "Error trying to call /store-invite on %s%s: %s", - id_server_scheme, - id_server, - e, - ) - - if data is None: - # Some identity servers may only support application/x-www-form-urlencoded - # types. This is especially true with old instances of Sydent, see - # https://github.com/matrix-org/sydent/pull/170 - try: - data = await self.blacklisting_http_client.post_urlencoded_get_json( - url, invite_config - ) - except HttpResponseException as e: - logger.warning( - "Error calling /store-invite on %s%s with fallback " - "encoding: %s", - id_server_scheme, - id_server, - e, - ) - raise e - - # TODO: Check for success + except RequestTimedOutError: + raise SynapseError(500, "Timed out contacting identity server") + token = data["token"] public_keys = data.get("public_keys", []) if "public_key" in data: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f64a8690a5..33e9a87002 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -19,6 +19,7 @@ import math import random import string from collections import OrderedDict +from http import HTTPStatus from typing import ( TYPE_CHECKING, Any, @@ -704,8 +705,8 @@ class RoomCreationHandler: was, requested, `room_alias`. Secondly, the stream_id of the last persisted event. Raises: - SynapseError if the room ID couldn't be stored, or something went - horribly wrong. + SynapseError if the room ID couldn't be stored, 3pid invitation config + validation failed, or something went horribly wrong. ResourceLimitError if server is blocked to some resource being exceeded """ @@ -731,6 +732,19 @@ class RoomCreationHandler: invite_3pid_list = config.get("invite_3pid", []) invite_list = config.get("invite", []) + # validate each entry for correctness + for invite_3pid in invite_3pid_list: + if not all( + key in invite_3pid + for key in ("medium", "address", "id_server", "id_access_token") + ): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "all of `medium`, `address`, `id_server` and `id_access_token` " + "are required when making a 3pid invite", + Codes.MISSING_PARAM, + ) + if not is_requester_admin: spam_check = await self.spam_checker.user_may_create_room(user_id) if spam_check != NOT_SPAM: @@ -978,7 +992,7 @@ class RoomCreationHandler: for invite_3pid in invite_3pid_list: id_server = invite_3pid["id_server"] - id_access_token = invite_3pid.get("id_access_token") # optional + id_access_token = invite_3pid["id_access_token"] address = invite_3pid["address"] medium = invite_3pid["medium"] # Note that do_3pid_invite can raise a ShadowBanError, but this was diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index e726997d83..5d4adf5bfd 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1382,7 +1382,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): id_server: str, requester: Requester, txn_id: Optional[str], - id_access_token: Optional[str] = None, + id_access_token: str, prev_event_ids: Optional[List[str]] = None, depth: Optional[int] = None, ) -> Tuple[str, int]: @@ -1397,7 +1397,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): requester: The user making the request. txn_id: The transaction ID this is part of, or None if this is not part of a transaction. - id_access_token: The optional identity server access token. + id_access_token: Identity server access token. depth: Override the depth used to order the event in the DAG. prev_event_ids: The event IDs to use as the prev events Should normally be set to None, which will cause the depth to be calculated @@ -1494,7 +1494,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): room_id: str, user: UserID, txn_id: Optional[str], - id_access_token: Optional[str] = None, + id_access_token: str, prev_event_ids: Optional[List[str]] = None, depth: Optional[int] = None, ) -> Tuple[EventBase, int]: diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 0e2834008e..0bca012535 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -17,6 +17,7 @@ import logging import re from enum import Enum +from http import HTTPStatus from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple from urllib import parse as urlparse @@ -947,7 +948,16 @@ class RoomMembershipRestServlet(TransactionRestServlet): # cheekily send invalid bodies. content = {} - if membership_action == "invite" and self._has_3pid_invite_keys(content): + if membership_action == "invite" and all( + key in content for key in ("medium", "address") + ): + if not all(key in content for key in ("id_server", "id_access_token")): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "`id_server` and `id_access_token` are required when doing 3pid invite", + Codes.MISSING_PARAM, + ) + try: await self.room_member_handler.do_3pid_invite( room_id, @@ -957,7 +967,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): content["id_server"], requester, txn_id, - content.get("id_access_token"), + content["id_access_token"], ) except ShadowBanError: # Pretend the request succeeded. @@ -994,12 +1004,6 @@ class RoomMembershipRestServlet(TransactionRestServlet): return 200, return_value - def _has_3pid_invite_keys(self, content: JsonDict) -> bool: - for key in {"id_server", "medium", "address"}: - if key not in content: - return False - return True - def on_PUT( self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 7435fd9130..9dd3c8d4bb 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -64,7 +64,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) - # How often to run the background job to update the "recently accessed" # attribute of local and remote media. UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 # 1 minute diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index dc17c9d113..b0c8215744 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -25,7 +25,6 @@ from tests import unittest class IdentityTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -33,7 +32,6 @@ class IdentityTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["enable_3pid_lookup"] = False self.hs = self.setup_test_homeserver(config=config) @@ -54,6 +52,7 @@ class IdentityTestCase(unittest.HomeserverTestCase): "id_server": "testis", "medium": "email", "address": "test@example.com", + "id_access_token": tok, } request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii") channel = self.make_request( diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index aa2f578441..c7eb88d33f 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -3461,3 +3461,21 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Also check that it stopped before calling _make_and_store_3pid_invite. make_invite_mock.assert_called_once() + + def test_400_missing_param_without_id_access_token(self) -> None: + """ + Test that a 3pid invite request returns 400 M_MISSING_PARAM + if we do not include id_access_token. + """ + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "medium": "email", + "address": "teresa@example.com", + }, + access_token=self.tok, + ) + self.assertEqual(channel.code, 400) + self.assertEqual(channel.json_body["errcode"], "M_MISSING_PARAM") diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index c50f034b34..c807a37bc2 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -97,7 +97,12 @@ class RoomTestCase(_ShadowBannedBase): channel = self.make_request( "POST", "/rooms/%s/invite" % (room_id,), - {"id_server": "test", "medium": "email", "address": "test@test.test"}, + { + "id_server": "test", + "medium": "email", + "address": "test@test.test", + "id_access_token": "anytoken", + }, access_token=self.banned_access_token, ) self.assertEqual(200, channel.code, channel.result) -- cgit 1.4.1 From 0e99f07952edcb6396654e34da50ddeb0a211067 Mon Sep 17 00:00:00 2001 From: Šimon Brandner Date: Thu, 1 Sep 2022 14:31:54 +0200 Subject: Remove support for unstable private read receipts (#13653) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Šimon Brandner --- changelog.d/13653.removal | 1 + synapse/api/constants.py | 1 - synapse/config/experimental.py | 3 -- synapse/handlers/receipts.py | 29 +++---------- synapse/replication/tcp/client.py | 5 +-- synapse/rest/client/notifications.py | 1 - synapse/rest/client/read_marker.py | 2 - synapse/rest/client/receipts.py | 2 - synapse/rest/client/versions.py | 1 - .../storage/databases/main/event_push_actions.py | 2 - tests/handlers/test_receipts.py | 48 ++++++---------------- tests/rest/client/test_sync.py | 37 +++++------------ tests/storage/test_receipts.py | 34 ++++++--------- 13 files changed, 44 insertions(+), 122 deletions(-) create mode 100644 changelog.d/13653.removal diff --git a/changelog.d/13653.removal b/changelog.d/13653.removal new file mode 100644 index 0000000000..eb075d4517 --- /dev/null +++ b/changelog.d/13653.removal @@ -0,0 +1 @@ +Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index c73aea622a..c178ddf070 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -258,7 +258,6 @@ class GuestAccess: class ReceiptTypes: READ: Final = "m.read" READ_PRIVATE: Final = "m.read.private" - UNSTABLE_READ_PRIVATE: Final = "org.matrix.msc2285.read.private" FULLY_READ: Final = "m.fully_read" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index c1ff417539..260db49cad 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -32,9 +32,6 @@ class ExperimentalConfig(Config): # MSC2716 (importing historical messages) self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) - # MSC2285 (unstable private read receipts) - self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False) - # MSC3244 (room version capabilities) self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index d4a866b346..d2bdb9c8be 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -163,10 +163,7 @@ class ReceiptsHandler: if not is_new: return - if self.federation_sender and receipt_type not in ( - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, - ): + if self.federation_sender and receipt_type != ReceiptTypes.READ_PRIVATE: await self.federation_sender.send_read_receipt(receipt) @@ -206,38 +203,24 @@ class ReceiptEventSource(EventSource[int, JsonDict]): for event_id, orig_event_content in room.get("content", {}).items(): event_content = orig_event_content # If there are private read receipts, additional logic is necessary. - if ( - ReceiptTypes.READ_PRIVATE in event_content - or ReceiptTypes.UNSTABLE_READ_PRIVATE in event_content - ): + if ReceiptTypes.READ_PRIVATE in event_content: # Make a copy without private read receipts to avoid leaking # other user's private read receipts.. event_content = { receipt_type: receipt_value for receipt_type, receipt_value in event_content.items() - if receipt_type - not in ( - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, - ) + if receipt_type != ReceiptTypes.READ_PRIVATE } # Copy the current user's private read receipt from the # original content, if it exists. - user_private_read_receipt = orig_event_content.get( - ReceiptTypes.READ_PRIVATE, {} - ).get(user_id, None) + user_private_read_receipt = orig_event_content[ + ReceiptTypes.READ_PRIVATE + ].get(user_id, None) if user_private_read_receipt: event_content[ReceiptTypes.READ_PRIVATE] = { user_id: user_private_read_receipt } - user_unstable_private_read_receipt = orig_event_content.get( - ReceiptTypes.UNSTABLE_READ_PRIVATE, {} - ).get(user_id, None) - if user_unstable_private_read_receipt: - event_content[ReceiptTypes.UNSTABLE_READ_PRIVATE] = { - user_id: user_unstable_private_read_receipt - } # Include the event if there is at least one non-private read # receipt or the current user has a private read receipt. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 1ed7230e32..e4f2201c92 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -416,10 +416,7 @@ class FederationSenderHandler: if not self._is_mine_id(receipt.user_id): continue # Private read receipts never get sent over federation. - if receipt.receipt_type in ( - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, - ): + if receipt.receipt_type == ReceiptTypes.READ_PRIVATE: continue receipt_info = ReadReceipt( receipt.room_id, diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index a73322a6a4..61268e3af1 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -62,7 +62,6 @@ class NotificationsServlet(RestServlet): [ ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, ], ) diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index aaad8b233f..5e53096539 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -45,8 +45,6 @@ class ReadMarkerRestServlet(RestServlet): ReceiptTypes.FULLY_READ, ReceiptTypes.READ_PRIVATE, } - if hs.config.experimental.msc2285_enabled: - self._known_receipt_types.add(ReceiptTypes.UNSTABLE_READ_PRIVATE) async def on_POST( self, request: SynapseRequest, room_id: str diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index c6108fc5eb..5b7fad7402 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -49,8 +49,6 @@ class ReceiptRestServlet(RestServlet): ReceiptTypes.READ_PRIVATE, ReceiptTypes.FULLY_READ, } - if hs.config.experimental.msc2285_enabled: - self._known_receipt_types.add(ReceiptTypes.UNSTABLE_READ_PRIVATE) async def on_POST( self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index c9a830cbac..c516cda95d 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -95,7 +95,6 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, # Supports receiving private read receipts as per MSC2285 "org.matrix.msc2285.stable": True, # TODO: Remove when MSC2285 becomes a part of the spec - "org.matrix.msc2285": self.config.experimental.msc2285_enabled, # Supports filtering of /publicRooms by room type as per MSC3827 "org.matrix.msc3827.stable": True, # Adds support for importing historical messages as per MSC2716 diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 9f410d69de..f4a07de2a3 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -274,7 +274,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas receipt_types=( ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, ), ) @@ -468,7 +467,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ( ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, ), ) diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py index 5f70a2db79..b55238650c 100644 --- a/tests/handlers/test_receipts.py +++ b/tests/handlers/test_receipts.py @@ -15,8 +15,6 @@ from copy import deepcopy from typing import List -from parameterized import parameterized - from synapse.api.constants import EduTypes, ReceiptTypes from synapse.types import JsonDict @@ -27,16 +25,13 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.event_source = hs.get_event_sources().sources.receipt - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_filters_out_private_receipt(self, receipt_type: str) -> None: + def test_filters_out_private_receipt(self) -> None: self._test_filters_private( [ { "content": { "$1435641916114394fHBLK:matrix.org": { - receipt_type: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, } @@ -50,18 +45,13 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): [], ) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_filters_out_private_receipt_and_ignores_rest( - self, receipt_type: str - ) -> None: + def test_filters_out_private_receipt_and_ignores_rest(self) -> None: self._test_filters_private( [ { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - receipt_type: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, }, @@ -94,18 +84,15 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) def test_filters_out_event_with_only_private_receipts_and_ignores_the_rest( - self, receipt_type: str + self, ) -> None: self._test_filters_private( [ { "content": { "$14356419edgd14394fHBLK:matrix.org": { - receipt_type: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, }, @@ -175,18 +162,15 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) def test_filters_out_receipt_event_with_only_private_receipt_and_ignores_rest( - self, receipt_type: str + self, ) -> None: self._test_filters_private( [ { "content": { "$14356419edgd14394fHBLK:matrix.org": { - receipt_type: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, }, @@ -262,16 +246,13 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_leaves_our_private_and_their_public(self, receipt_type: str) -> None: + def test_leaves_our_private_and_their_public(self) -> None: self._test_filters_private( [ { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - receipt_type: { + ReceiptTypes.READ_PRIVATE: { "@me:server.org": { "ts": 1436451550453, }, @@ -296,7 +277,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - receipt_type: { + ReceiptTypes.READ_PRIVATE: { "@me:server.org": { "ts": 1436451550453, }, @@ -319,16 +300,13 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_we_do_not_mutate(self, receipt_type: str) -> None: + def test_we_do_not_mutate(self) -> None: """Ensure the input values are not modified.""" events = [ { "content": { "$1435641916114394fHBLK:matrix.org": { - receipt_type: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, } diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index de0dec8539..0af643ecd9 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -391,7 +391,6 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() - config["experimental_features"] = {"msc2285_enabled": True} return self.setup_test_homeserver(config=config) @@ -413,17 +412,14 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Join the second user self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_private_read_receipts(self, receipt_type: str) -> None: + def test_private_read_receipts(self) -> None: # Send a message as the first user res = self.helper.send(self.room_id, body="hello", tok=self.tok) # Send a private read receipt to tell the server the first user's message was read channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{receipt_type}/{res['event_id']}", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", {}, access_token=self.tok2, ) @@ -432,10 +428,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Test that the first user can't see the other user's private read receipt self.assertIsNone(self._get_read_receipt()) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_public_receipt_can_override_private(self, receipt_type: str) -> None: + def test_public_receipt_can_override_private(self) -> None: """ Sending a public read receipt to the same event which has a private read receipt should cause that receipt to become public. @@ -446,7 +439,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Send a private read receipt channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{receipt_type}/{res['event_id']}", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", {}, access_token=self.tok2, ) @@ -465,10 +458,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Test that we did override the private read receipt self.assertNotEqual(self._get_read_receipt(), None) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_private_receipt_cannot_override_public(self, receipt_type: str) -> None: + def test_private_receipt_cannot_override_public(self) -> None: """ Sending a private read receipt to the same event which has a public read receipt should cause no change. @@ -489,7 +479,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Send a private read receipt channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{receipt_type}/{res['event_id']}", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", {}, access_token=self.tok2, ) @@ -554,7 +544,6 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): config = super().default_config() config["experimental_features"] = { "msc2654_enabled": True, - "msc2285_enabled": True, } return config @@ -601,10 +590,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): tok=self.tok, ) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_unread_counts(self, receipt_type: str) -> None: + def test_unread_counts(self) -> None: """Tests that /sync returns the right value for the unread count (MSC2654).""" # Check that our own messages don't increase the unread count. @@ -638,7 +624,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): # Send a read receipt to tell the server we've read the latest event. channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{receipt_type}/{res['event_id']}", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", {}, access_token=self.tok, ) @@ -726,7 +712,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{receipt_type}/{res2['event_id']}", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res2['event_id']}", {}, access_token=self.tok, ) @@ -738,7 +724,6 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): [ ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, ] ) def test_read_receipts_only_go_down(self, receipt_type: str) -> None: @@ -752,7 +737,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): # Read last event channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{receipt_type}/{res2['event_id']}", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res2['event_id']}", {}, access_token=self.tok, ) @@ -763,7 +748,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): # read receipt go up to an older event channel = self.make_request( "POST", - f"/rooms/{self.room_id}/receipt/{receipt_type}/{res1['event_id']}", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res1['event_id']}", {}, access_token=self.tok, ) diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py index 191c957fb5..c89bfff241 100644 --- a/tests/storage/test_receipts.py +++ b/tests/storage/test_receipts.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from parameterized import parameterized from synapse.api.constants import ReceiptTypes from synapse.types import UserID, create_requester @@ -92,7 +91,6 @@ class ReceiptTestCase(HomeserverTestCase): [ ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, ], ) ) @@ -104,7 +102,6 @@ class ReceiptTestCase(HomeserverTestCase): [ ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, ], ) ) @@ -117,16 +114,12 @@ class ReceiptTestCase(HomeserverTestCase): [ ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE, - ReceiptTypes.UNSTABLE_READ_PRIVATE, ], ) ) self.assertEqual(res, None) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_get_receipts_for_user(self, receipt_type: str) -> None: + def test_get_receipts_for_user(self) -> None: # Send some events into the first room event1_1_id = self.create_and_send_event( self.room_id1, UserID.from_string(OTHER_USER_ID) @@ -144,14 +137,14 @@ class ReceiptTestCase(HomeserverTestCase): # Send private read receipt for the second event self.get_success( self.store.insert_receipt( - self.room_id1, receipt_type, OUR_USER_ID, [event1_2_id], {} + self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} ) ) # Test we get the latest event when we want both private and public receipts res = self.get_success( self.store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ, receipt_type] + OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] ) ) self.assertEqual(res, {self.room_id1: event1_2_id}) @@ -164,7 +157,7 @@ class ReceiptTestCase(HomeserverTestCase): # Test we get the latest event when we want only the public receipt res = self.get_success( - self.store.get_receipts_for_user(OUR_USER_ID, [receipt_type]) + self.store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ_PRIVATE]) ) self.assertEqual(res, {self.room_id1: event1_2_id}) @@ -187,20 +180,17 @@ class ReceiptTestCase(HomeserverTestCase): # Test new room is reflected in what the method returns self.get_success( self.store.insert_receipt( - self.room_id2, receipt_type, OUR_USER_ID, [event2_1_id], {} + self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} ) ) res = self.get_success( self.store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ, receipt_type] + OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] ) ) self.assertEqual(res, {self.room_id1: event1_2_id, self.room_id2: event2_1_id}) - @parameterized.expand( - [ReceiptTypes.READ_PRIVATE, ReceiptTypes.UNSTABLE_READ_PRIVATE] - ) - def test_get_last_receipt_event_id_for_user(self, receipt_type: str) -> None: + def test_get_last_receipt_event_id_for_user(self) -> None: # Send some events into the first room event1_1_id = self.create_and_send_event( self.room_id1, UserID.from_string(OTHER_USER_ID) @@ -218,7 +208,7 @@ class ReceiptTestCase(HomeserverTestCase): # Send private read receipt for the second event self.get_success( self.store.insert_receipt( - self.room_id1, receipt_type, OUR_USER_ID, [event1_2_id], {} + self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} ) ) @@ -227,7 +217,7 @@ class ReceiptTestCase(HomeserverTestCase): self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id1, - [ReceiptTypes.READ, receipt_type], + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], ) ) self.assertEqual(res, event1_2_id) @@ -243,7 +233,7 @@ class ReceiptTestCase(HomeserverTestCase): # Test we get the latest event when we want only the private receipt res = self.get_success( self.store.get_last_receipt_event_id_for_user( - OUR_USER_ID, self.room_id1, [receipt_type] + OUR_USER_ID, self.room_id1, [ReceiptTypes.READ_PRIVATE] ) ) self.assertEqual(res, event1_2_id) @@ -269,14 +259,14 @@ class ReceiptTestCase(HomeserverTestCase): # Test new room is reflected in what the method returns self.get_success( self.store.insert_receipt( - self.room_id2, receipt_type, OUR_USER_ID, [event2_1_id], {} + self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} ) ) res = self.get_success( self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id2, - [ReceiptTypes.READ, receipt_type], + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], ) ) self.assertEqual(res, event2_1_id) -- cgit 1.4.1 From 18e40928010f990a8a9ce6db50a20b7add65bdbc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Sep 2022 14:46:47 +0200 Subject: Bump docker image to use stable poetry version (#13688) --- .github/workflows/tests.yml | 1 - changelog.d/13688.docker | 1 + docker/Dockerfile | 11 +---------- 3 files changed, 2 insertions(+), 11 deletions(-) create mode 100644 changelog.d/13688.docker diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3e6d718a7c..49a010d9d8 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -283,7 +283,6 @@ jobs: - run: sudo apt-get -qq install xmlsec1 - uses: matrix-org/setup-python-poetry@v1 with: - python-version: ${{ matrix.python-version }} extras: "postgres" - run: .ci/scripts/test_export_data_command.sh diff --git a/changelog.d/13688.docker b/changelog.d/13688.docker new file mode 100644 index 0000000000..8935e2536f --- /dev/null +++ b/changelog.d/13688.docker @@ -0,0 +1 @@ +Update docker image to use a stable version of poetry. diff --git a/docker/Dockerfile b/docker/Dockerfile index fa58ae3acb..b87d263cff 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -46,17 +46,8 @@ RUN \ # We install poetry in its own build stage to avoid its dependencies conflicting with # synapse's dependencies. -# We use a specific commit from poetry's master branch instead of our usual 1.1.14, -# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to -# https://github.com/python-poetry/poetry/pull/5156 and -# https://github.com/python-poetry/poetry/issues/5141 ; -# without it, we generate a requirements.txt with incorrect environment markers, -# which causes necessary packages to be omitted when we `pip install`. -# -# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also -# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export). RUN --mount=type=cache,target=/root/.cache/pip \ - pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5" + pip install --user "poetry==1.2.0" WORKDIR /synapse -- cgit 1.4.1 From e8130f219b8aebb00441e4979d34eb3b5d3d6f03 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 1 Sep 2022 13:54:02 +0100 Subject: Return keys for unwhitelisted servers from `/_matrix/key/v2/query` (#13683) --- changelog.d/13683.bugfix | 1 + synapse/rest/key/v2/remote_key_resource.py | 41 +++++++++++++++--------------- 2 files changed, 22 insertions(+), 20 deletions(-) create mode 100644 changelog.d/13683.bugfix diff --git a/changelog.d/13683.bugfix b/changelog.d/13683.bugfix new file mode 100644 index 0000000000..538534fec1 --- /dev/null +++ b/changelog.d/13683.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index f597157581..7f8ad29566 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -135,13 +135,6 @@ class RemoteKey(DirectServeJsonResource): store_queries = [] for server_name, key_ids in query.items(): - if ( - self.federation_domain_whitelist is not None - and server_name not in self.federation_domain_whitelist - ): - logger.debug("Federation denied with %s", server_name) - continue - if not key_ids: key_ids = (None,) for key_id in key_ids: @@ -153,21 +146,28 @@ class RemoteKey(DirectServeJsonResource): time_now_ms = self.clock.time_msec() - # Note that the value is unused. + # Map server_name->key_id->int. Note that the value of the init is unused. + # XXX: why don't we just use a set? cache_misses: Dict[str, Dict[str, int]] = {} for (server_name, key_id, _), key_results in cached.items(): results = [(result["ts_added_ms"], result) for result in key_results] - if not results and key_id is not None: - cache_misses.setdefault(server_name, {})[key_id] = 0 + if key_id is None: + # all keys were requested. Just return what we have without worrying + # about validity + for _, result in results: + # Cast to bytes since postgresql returns a memoryview. + json_results.add(bytes(result["key_json"])) continue - if key_id is not None: + miss = False + if not results: + miss = True + else: ts_added_ms, most_recent_result = max(results) ts_valid_until_ms = most_recent_result["ts_valid_until_ms"] req_key = query.get(server_name, {}).get(key_id, {}) req_valid_until = req_key.get("minimum_valid_until_ts") - miss = False if req_valid_until is not None: if ts_valid_until_ms < req_valid_until: logger.debug( @@ -211,19 +211,20 @@ class RemoteKey(DirectServeJsonResource): ts_valid_until_ms, time_now_ms, ) - - if miss: - cache_misses.setdefault(server_name, {})[key_id] = 0 # Cast to bytes since postgresql returns a memoryview. json_results.add(bytes(most_recent_result["key_json"])) - else: - for _, result in results: - # Cast to bytes since postgresql returns a memoryview. - json_results.add(bytes(result["key_json"])) + + if miss and query_remote_on_cache_miss: + # only bother attempting to fetch keys from servers on our whitelist + if ( + self.federation_domain_whitelist is None + or server_name in self.federation_domain_whitelist + ): + cache_misses.setdefault(server_name, {})[key_id] = 0 # If there is a cache miss, request the missing keys, then recurse (and # ensure the result is sent). - if cache_misses and query_remote_on_cache_miss: + if cache_misses: await yieldable_gather_results( lambda t: self.fetcher.get_keys(*t), ( -- cgit 1.4.1 From 2318603772f2e1595f9b39369246d5955a069381 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Sep 2022 14:54:52 +0200 Subject: Add some logging to help track down #13444 (#13679) --- changelog.d/13679.misc | 1 + synapse/federation/sender/__init__.py | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelog.d/13679.misc diff --git a/changelog.d/13679.misc b/changelog.d/13679.misc new file mode 100644 index 0000000000..a4fa94da9d --- /dev/null +++ b/changelog.d/13679.misc @@ -0,0 +1 @@ +Add some logging to help track down #13444. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 94a65ac65f..8bc60e3e3e 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -441,6 +441,19 @@ class FederationSender(AbstractFederationSender): destinations = await self._external_cache.get( "get_joined_hosts", str(sg) ) + if destinations is None: + # Add logging to help track down #13444 + logger.info( + "Unexpectedly did not have cached destinations for %s / %s", + sg, + event.event_id, + ) + else: + # Add logging to help track down #13444 + logger.info( + "Unexpectedly did not have cached prev group for %s", + event.event_id, + ) if destinations is None: try: -- cgit 1.4.1 From dcfb006f8a95366f9e388b313615039462fef0f9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Sep 2022 16:11:44 +0200 Subject: Use poetry 1.2.0 rather than prerelease for deb packages (#13695) --- debian/build_virtualenv | 2 +- debian/changelog | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/debian/build_virtualenv b/debian/build_virtualenv index f1ec609163..ed916ac97a 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -36,7 +36,7 @@ TEMP_VENV="$(mktemp -d)" python3 -m venv "$TEMP_VENV" source "$TEMP_VENV/bin/activate" pip install -U pip -pip install poetry==1.2.0b1 +pip install poetry==1.2.0 poetry export \ --extras all \ --extras test \ diff --git a/debian/changelog b/debian/changelog index 9398504a27..2b7b329b6b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.66.0ubuntu1) UNRELEASED; urgency=medium + + * Use stable poetry 1.2.0 version, rather than a prerelease. + + -- Erik Johnston Thu, 01 Sep 2022 13:48:31 +0100 + matrix-synapse-py3 (1.66.0) stable; urgency=medium * New Synapse release 1.66.0. -- cgit 1.4.1 From c913e440c065adde1e055eb0baef5df40615ae9b Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Thu, 1 Sep 2022 15:48:43 +0100 Subject: Add monthly active users documentation (#13617) * Add monthly active users documentation * changelog * Tidy up notes * more tidyup * Rewrite #1 * link back to mau docs * fix links * s/appservice|AS/application service * further review * a newline * Remove bit about shadow banned users. I think talking about them is confusing, and the current text doesn't imply they get any special treatment. * Update docs/usage/administration/monthly_active_users.md Co-authored-by: Patrick Cloke * Update docs/usage/administration/monthly_active_users.md Co-authored-by: Patrick Cloke Co-authored-by: Brendan Abolivier Co-authored-by: Patrick Cloke --- changelog.d/13617.doc | 1 + docs/SUMMARY.md | 1 + docs/usage/administration/monthly_active_users.md | 84 +++++++++++++++++++++++ docs/usage/configuration/config_documentation.md | 2 + 4 files changed, 88 insertions(+) create mode 100644 changelog.d/13617.doc create mode 100644 docs/usage/administration/monthly_active_users.md diff --git a/changelog.d/13617.doc b/changelog.d/13617.doc new file mode 100644 index 0000000000..5c7db7c3d7 --- /dev/null +++ b/changelog.d/13617.doc @@ -0,0 +1 @@ +Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 2d56b084e2..16720bceb5 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -69,6 +69,7 @@ - [Manhole](manhole.md) - [Monitoring](metrics-howto.md) - [Reporting Homeserver Usage Statistics](usage/administration/monitoring/reporting_homeserver_usage_statistics.md) + - [Monthly Active Users](usage/administration/monthly_active_users.md) - [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md) - [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md) - [Database Maintenance Tools](usage/administration/database_maintenance_tools.md) diff --git a/docs/usage/administration/monthly_active_users.md b/docs/usage/administration/monthly_active_users.md new file mode 100644 index 0000000000..d4e9037284 --- /dev/null +++ b/docs/usage/administration/monthly_active_users.md @@ -0,0 +1,84 @@ +# Monthly Active Users + +Synapse can be configured to record the number of monthly active users (also referred to as MAU) on a given homeserver. +For clarity's sake, MAU only tracks local users. + +Please note that the metrics recorded by the [Homeserver Usage Stats](../../usage/administration/monitoring/reporting_homeserver_usage_statistics.md) +are calculated differently. The `monthly_active_users` from the usage stats does not take into account any +of the rules below, and counts any users who have made a request to the homeserver in the last 30 days. + +See the [configuration manual](../../usage/configuration/config_documentation.md#limit_usage_by_mau) for details on how to configure MAU. + +## Calculating active users + +Individual user activity is measured in active days. If a user performs an action, the exact time of that action is then recorded. When +calculating the MAU figure, any users with a recorded action in the last 30 days are considered part of the cohort. Days are measured +as a rolling window from the current system time to 30 days ago. + +So for example, if Synapse were to calculate the active users on the 15th July at 13:25, it would include any activity from 15th June 13:25 onwards. + +A user is **never** considered active if they are either: + - Part of the trial day cohort (described below) + - Owned by an application service. + - Note: This **only** covers users that are part of an application service `namespaces.users` registration. The namespace + must also be marked as `exclusive`. + +Otherwise, any request to Synapse will mark the user as active. Please note that registration will not mark a user as active *unless* +they register with a 3pid that is included in the config field `mau_limits_reserved_threepids`. + +The Prometheus metric for MAU is refreshed every 5 minutes. + +Once an hour, Synapse checks to see if any users are inactive (with only activity timestamps later than 30 days). These users +are removed from the active users cohort. If they then become active, they are immediately restored to the cohort. + +It is important to note that **deactivated** users are not immediately removed from the pool of active users, but as these users won't +perform actions they will eventually be removed from the cohort. + +### Trial days + +If the config option `mau_trial_days` is set, a user must have been active this many days **after** registration to be active. A user is in the +trial period if their registration timestamp (also known as the `creation_ts`) is less than `mau_trial_days` old. + +As an example, if `mau_trial_days` is set to `3` and a user is active **after** 3 days (72 hours from registration time) then they will be counted as active. + +The `mau_appservice_trial_days` config further extends this rule by applying different durations depending on the `appservice_id` of the user. +Users registered by an application service will be recorded with an `appservice_id` matching the `id` key in the registration file for that service. + + +## Limiting usage of the homeserver when the maximum MAU is reached + +If both config options `limit_usage_by_mau` and `max_mau_value` is set, and the current MAU value exceeds the maximum value, the +homeserver will begin to block some actions. + +Individual users matching **any** of the below criteria never have their actions blocked: + - Considered part of the cohort of MAU users. + - Considered part of the trial period. + - Registered as a `support` user. + - Application service users if `track_appservice_user_ips` is NOT set. + +Please not that server admins are **not** exempt from blocking. + +The following actions are blocked when the MAU limit is exceeded: + - Logging in + - Sending events + - Creating rooms + - Syncing + +Registration is also blocked for all new signups *unless* the user is registering with a threepid included in the `mau_limits_reserved_threepids` +config value. + +When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMIT_EXCEEDED`. + +## Metrics + +Synapse records several different prometheus metrics for MAU. + +`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users. + +`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value. + +`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used +to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` . + +`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have +registered accounts on the homeserver. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index bc9fc86bc8..88e8e1c66f 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -595,6 +595,8 @@ server owner wants to limit to the number of monthly active users. When enabled reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`. Defaults to false. If this is enabled, a value for `max_mau_value` must also be set. +See [Monthly Active Users](../administration/monthly_active_users.md) for details on how to configure MAU. + Example configuration: ```yaml limit_usage_by_mau: true -- cgit 1.4.1 From 9d2823ab70e69a90f4a54256bd32cd85eab130d9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Sep 2022 17:07:01 +0200 Subject: Cache `is_partial_state_room` (#13693) Fixes #13613. --- changelog.d/13693.misc | 1 + synapse/storage/databases/main/room.py | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13693.misc diff --git a/changelog.d/13693.misc b/changelog.d/13693.misc new file mode 100644 index 0000000000..31490191c9 --- /dev/null +++ b/changelog.d/13693.misc @@ -0,0 +1 @@ +Add cache to `is_partial_state_room`. diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 367424b4a8..bef66f1992 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1185,8 +1185,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): ) return False - @staticmethod - def _clear_partial_state_room_txn(txn: LoggingTransaction, room_id: str) -> None: + def _clear_partial_state_room_txn( + self, txn: LoggingTransaction, room_id: str + ) -> None: DatabasePool.simple_delete_txn( txn, table="partial_state_rooms_servers", @@ -1197,7 +1198,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): table="partial_state_rooms", keyvalues={"room_id": room_id}, ) + self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,)) + @cached() async def is_partial_state_room(self, room_id: str) -> bool: """Checks if this room has partial state. @@ -1771,9 +1774,8 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): servers, ) - @staticmethod def _store_partial_state_room_txn( - txn: LoggingTransaction, room_id: str, servers: Collection[str] + self, txn: LoggingTransaction, room_id: str, servers: Collection[str] ) -> None: DatabasePool.simple_insert_txn( txn, @@ -1788,6 +1790,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): keys=("room_id", "server_name"), values=((room_id, s) for s in servers), ) + self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,)) async def maybe_store_room_on_outlier_membership( self, room_id: str, room_version: RoomVersion -- cgit 1.4.1 From f48f4dd59eb2d98940ac2000de07f3324cd5b188 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 1 Sep 2022 15:27:06 +0000 Subject: Update the Grafana dashboard that is included with Synapse in the `contrib` directory. (#13697) * Add missing graph to contrib * Update with minor but plausible changes, including positioning changes * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/13697.misc | 1 + contrib/grafana/synapse.json | 147 +++++++++++++++++++++++++++++++++++++------ 2 files changed, 128 insertions(+), 20 deletions(-) create mode 100644 changelog.d/13697.misc diff --git a/changelog.d/13697.misc b/changelog.d/13697.misc new file mode 100644 index 0000000000..b9d2a60961 --- /dev/null +++ b/changelog.d/13697.misc @@ -0,0 +1 @@ +Update the Grafana dashboard that is included with Synapse in the `contrib` directory. \ No newline at end of file diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index 0f23fc17ea..248cd6d9ad 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -3244,6 +3244,104 @@ "yaxis": { "align": false } + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average number of hosts being rate limited across each worker type.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 53 + }, + "id": 225, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "avg by(job, rate_limiter_name) (synapse_rate_limit_sleep_affected_hosts{instance=\"$instance\", job=~\"$job\", index=~\"$index\"})", + "hide": false, + "legendFormat": "Slept by {{job}}:{{rate_limiter_name}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "avg by(job, rate_limiter_name) (synapse_rate_limit_reject_affected_hosts{instance=\"$instance\", job=~\"$job\", index=~\"$index\"})", + "legendFormat": "Rejected by {{job}}:{{rate_limiter_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Hosts being rate limited", + "type": "timeseries" } ], "targets": [ @@ -6404,7 +6502,7 @@ "h": 13, "w": 12, "x": 0, - "y": 10 + "y": 35 }, "hiddenSeries": false, "id": 12, @@ -6502,7 +6600,7 @@ "h": 13, "w": 12, "x": 12, - "y": 10 + "y": 35 }, "hiddenSeries": false, "id": 26, @@ -6601,7 +6699,7 @@ "h": 13, "w": 12, "x": 0, - "y": 23 + "y": 48 }, "hiddenSeries": false, "id": 13, @@ -6705,7 +6803,7 @@ "h": 13, "w": 12, "x": 12, - "y": 23 + "y": 48 }, "hiddenSeries": false, "id": 27, @@ -6803,7 +6901,7 @@ "h": 13, "w": 12, "x": 0, - "y": 36 + "y": 61 }, "hiddenSeries": false, "id": 28, @@ -6900,7 +6998,7 @@ "h": 13, "w": 12, "x": 12, - "y": 36 + "y": 61 }, "hiddenSeries": false, "id": 25, @@ -6935,7 +7033,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])", + "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -6960,11 +7058,13 @@ }, "yaxes": [ { - "format": "ms", + "$$hashKey": "object:180", + "format": "s", "logBase": 1, "show": true }, { + "$$hashKey": "object:181", "format": "short", "logBase": 1, "show": true @@ -6988,7 +7088,7 @@ "h": 15, "w": 12, "x": 0, - "y": 49 + "y": 74 }, "hiddenSeries": false, "id": 154, @@ -7009,7 +7109,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.0.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -7109,7 +7209,7 @@ "h": 10, "w": 12, "x": 0, - "y": 36 + "y": 69 }, "hiddenSeries": false, "id": 1, @@ -7211,7 +7311,7 @@ "h": 10, "w": 12, "x": 12, - "y": 36 + "y": 69 }, "hiddenSeries": false, "id": 8, @@ -7311,7 +7411,7 @@ "h": 10, "w": 12, "x": 0, - "y": 46 + "y": 79 }, "hiddenSeries": false, "id": 38, @@ -7407,7 +7507,7 @@ "h": 10, "w": 12, "x": 12, - "y": 46 + "y": 79 }, "hiddenSeries": false, "id": 39, @@ -7415,11 +7515,16 @@ "alignAsTable": true, "avg": false, "current": false, - "max": false, + "hideEmpty": false, + "hideZero": false, + "max": true, "min": false, + "rightSide": false, "show": true, + "sort": "max", + "sortDesc": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, @@ -7467,11 +7572,13 @@ }, "yaxes": [ { + "$$hashKey": "object:101", "format": "rps", "logBase": 1, "show": true }, { + "$$hashKey": "object:102", "format": "short", "logBase": 1, "show": true @@ -7501,7 +7608,7 @@ "h": 9, "w": 12, "x": 0, - "y": 56 + "y": 89 }, "hiddenSeries": false, "id": 65, @@ -11757,8 +11864,8 @@ ] }, "time": { - "from": "2022-07-22T04:08:13.716Z", - "to": "2022-07-22T18:44:27.863Z" + "from": "now-3h", + "to": "now" }, "timepicker": { "now": true, @@ -11789,6 +11896,6 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "version": 124, + "version": 132, "weekStart": "" } \ No newline at end of file -- cgit 1.4.1 From 390b7ce946173b41a61f427ef25a9a1d0371ad0b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 1 Sep 2022 12:52:03 -0400 Subject: Disable calculating unread counts unless the config flag is enabled. (#13694) This avoids doing work that will never be used (since the resulting unread counts will never be sent in a /sync response). The negative of doing this is that unread counts will be incorrect when the feature is initially enabled. --- changelog.d/13694.bugfix | 1 + synapse/config/experimental.py | 3 +++ synapse/push/bulk_push_rule_evaluator.py | 7 +++++- tests/storage/test_event_push_actions.py | 42 +++++++++++++++----------------- 4 files changed, 30 insertions(+), 23 deletions(-) create mode 100644 changelog.d/13694.bugfix diff --git a/changelog.d/13694.bugfix b/changelog.d/13694.bugfix new file mode 100644 index 0000000000..48b9bb5f0a --- /dev/null +++ b/changelog.d/13694.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 260db49cad..702b81e636 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -71,6 +71,9 @@ class ExperimentalConfig(Config): self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) # MSC2654: Unread counts + # + # Note that enabling this will result in an incorrect unread count for + # previously calculated push actions. self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False) # MSC2815 (allow room moderators to view redacted event content) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index ccd512be54..d1caf8a0f7 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -262,7 +262,12 @@ class BulkPushRuleEvaluator: # This can happen due to out of band memberships return - count_as_unread = _should_count_as_unread(event, context) + # Disable counting as unread unless the experimental configuration is + # enabled, as it can cause additional (unwanted) rows to be added to the + # event_push_actions table. + count_as_unread = False + if self.hs.config.experimental.msc2654_enabled: + count_as_unread = _should_count_as_unread(event, context) rules_by_user = await self._get_rules_for_event(event) actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {} diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 62fd4aeb2f..fc43d7edd1 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -67,9 +67,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): last_event_id: str - def _assert_counts( - noitf_count: int, unread_count: int, highlight_count: int - ) -> None: + def _assert_counts(noitf_count: int, highlight_count: int) -> None: counts = self.get_success( self.store.db_pool.runInteraction( "get-unread-counts", @@ -82,7 +80,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): counts, NotifCounts( notify_count=noitf_count, - unread_count=unread_count, + unread_count=0, highlight_count=highlight_count, ), ) @@ -112,27 +110,27 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ) ) - _assert_counts(0, 0, 0) + _assert_counts(0, 0) _create_event() - _assert_counts(1, 1, 0) + _assert_counts(1, 0) _rotate() - _assert_counts(1, 1, 0) + _assert_counts(1, 0) event_id = _create_event() - _assert_counts(2, 2, 0) + _assert_counts(2, 0) _rotate() - _assert_counts(2, 2, 0) + _assert_counts(2, 0) _create_event() _mark_read(event_id) - _assert_counts(1, 1, 0) + _assert_counts(1, 0) _mark_read(last_event_id) - _assert_counts(0, 0, 0) + _assert_counts(0, 0) _create_event() _rotate() - _assert_counts(1, 1, 0) + _assert_counts(1, 0) # Delete old event push actions, this should not affect the (summarised) count. # @@ -151,35 +149,35 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ) ) self.assertEqual(result, []) - _assert_counts(1, 1, 0) + _assert_counts(1, 0) _mark_read(last_event_id) - _assert_counts(0, 0, 0) + _assert_counts(0, 0) event_id = _create_event(True) - _assert_counts(1, 1, 1) + _assert_counts(1, 1) _rotate() - _assert_counts(1, 1, 1) + _assert_counts(1, 1) # Check that adding another notification and rotating after highlight # works. _create_event() _rotate() - _assert_counts(2, 2, 1) + _assert_counts(2, 1) # Check that sending read receipts at different points results in the # right counts. _mark_read(event_id) - _assert_counts(1, 1, 0) + _assert_counts(1, 0) _mark_read(last_event_id) - _assert_counts(0, 0, 0) + _assert_counts(0, 0) _create_event(True) - _assert_counts(1, 1, 1) + _assert_counts(1, 1) _mark_read(last_event_id) - _assert_counts(0, 0, 0) + _assert_counts(0, 0) _rotate() - _assert_counts(0, 0, 0) + _assert_counts(0, 0) def test_find_first_stream_ordering_after_ts(self) -> None: def add_event(so: int, ts: int) -> None: -- cgit 1.4.1 From 48a5c47a9f7379f3b7e85c5a8dcbae346eb22310 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 1 Sep 2022 14:57:47 -0400 Subject: Add a schema delta to drop unstable private read receipts. (#13692) Otherwise they'll be leaked due to the filtering code only respecting the stable identifiers for private read receipts. --- changelog.d/13692.removal | 1 + .../72/05remove_unstable_private_read_receipts.sql | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 changelog.d/13692.removal create mode 100644 synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql diff --git a/changelog.d/13692.removal b/changelog.d/13692.removal new file mode 100644 index 0000000000..eb075d4517 --- /dev/null +++ b/changelog.d/13692.removal @@ -0,0 +1 @@ +Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). diff --git a/synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql b/synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql new file mode 100644 index 0000000000..36b41049cd --- /dev/null +++ b/synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql @@ -0,0 +1,19 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Drop previously received private read receipts so they do not accidentally +-- get leaked to other users. +DELETE FROM receipts_linearized WHERE receipt_type = 'org.matrix.msc2285.read.private'; +DELETE FROM receipts_graph WHERE receipt_type = 'org.matrix.msc2285.read.private'; -- cgit 1.4.1 From 044900af6cc441d700d171098812786b4b312f59 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 1 Sep 2022 22:38:37 +0200 Subject: Fix two typos with colon in headlines (#13665) --- changelog.d/13665.doc | 1 + docs/usage/configuration/config_documentation.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13665.doc diff --git a/changelog.d/13665.doc b/changelog.d/13665.doc new file mode 100644 index 0000000000..6ee6434662 --- /dev/null +++ b/changelog.d/13665.doc @@ -0,0 +1 @@ +Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 88e8e1c66f..396c560822 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2486,7 +2486,7 @@ report_stats_endpoint: https://example.com/report-usage-stats/push Config settings related to the client/server API --- -### `room_prejoin_state:` +### `room_prejoin_state` Controls for the state that is shared with users who receive an invite to a room. By default, the following state event types are shared with users who @@ -2626,7 +2626,7 @@ Example configuration: key_refresh_interval: 2d ``` --- -### `trusted_key_servers:` +### `trusted_key_servers` The trusted servers to download signing keys from. -- cgit 1.4.1 From 0fdb685c2b95aa4b727f291c02821ceee6d40dad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Sep 2022 14:12:47 +0200 Subject: Only run trial CI on all python versions on non-PRs (#13698) --- .github/workflows/tests.yml | 10 ++++++++++ changelog.d/13698.misc | 1 + 2 files changed, 11 insertions(+) create mode 100644 changelog.d/13698.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 49a010d9d8..720cb13907 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -82,6 +82,16 @@ jobs: python-version: ["3.7", "3.8", "3.9", "3.10"] database: ["sqlite"] extras: ["all"] + is_pr: + - ${{ startsWith(github.ref, 'refs/pull/') }} + + # If we're a PR then we only test min and max python. + exclude: + - is_pr: true + python-version: 3.8 + - is_pr: true + python-version: 3.9 + include: # Newest Python without optional deps - python-version: "3.10" diff --git a/changelog.d/13698.misc b/changelog.d/13698.misc new file mode 100644 index 0000000000..20b6b6d084 --- /dev/null +++ b/changelog.d/13698.misc @@ -0,0 +1 @@ +Only run trial CI on all python versions on non-PRs. -- cgit 1.4.1 From 4fee4a339d3bb1a90487a86ccbcca50a36b0ea74 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Sep 2022 15:20:03 +0200 Subject: Update lock file for Poetry v1.2.0 (#13689) --- changelog.d/13689.misc | 1 + docs/development/contributing_guide.md | 2 + docs/development/dependencies.md | 9 +- docs/upgrade.md | 7 ++ poetry.lock | 146 +++++++++++++++++++-------------- 5 files changed, 99 insertions(+), 66 deletions(-) create mode 100644 changelog.d/13689.misc diff --git a/changelog.d/13689.misc b/changelog.d/13689.misc new file mode 100644 index 0000000000..db6e48b150 --- /dev/null +++ b/changelog.d/13689.misc @@ -0,0 +1 @@ +Update poetry lock file for v1.2.0. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index ab320cbd78..4e1df51164 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -62,6 +62,8 @@ pipx install poetry but see poetry's [installation instructions](https://python-poetry.org/docs/#installation) for other installation methods. +Synapse requires Poetry version 1.2.0 or later. + Next, open a terminal and install dependencies as follows: ```sh diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index 236856a6b0..b356870f27 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -243,14 +243,11 @@ doesn't require poetry. (It's what we use in CI too). However, you could try ## Check the version of poetry with `poetry --version`. -At the time of writing, the 1.2 series is beta only. We have seen some examples -where the lockfiles generated by 1.2 prereleasese aren't interpreted correctly -by poetry 1.1.x. For now, use poetry 1.1.14, which includes a critical -[change](https://github.com/python-poetry/poetry/pull/5973) needed to remain -[compatible with PyPI](https://github.com/pypi/warehouse/pull/11775). +The minimum version of poetry supported by Synapse is 1.2. It can also be useful to check the version of `poetry-core` in use. If you've -installed `poetry` with `pipx`, try `pipx runpip poetry list | grep poetry-core`. +installed `poetry` with `pipx`, try `pipx runpip poetry list | grep +poetry-core`. ## Clear caches: `poetry cache clear --all pypi`. diff --git a/docs/upgrade.md b/docs/upgrade.md index 51719f8c73..422a3da664 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,13 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.67.0 + +## Minimum version of Poetry is now v1.2.0 + +The minimum supported version of poetry is now 1.2. This should only affect +those installing from a source checkout. + # Upgrading to v1.66.0 ## Delegation of email validation no longer supported diff --git a/poetry.lock b/poetry.lock index 651659ec98..35021390bf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -7,10 +7,10 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"] +dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "sphinx", "sphinx-notfound-page", "zope.interface"] +docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] +tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "zope.interface"] +tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six"] [[package]] name = "authlib" @@ -39,7 +39,7 @@ attrs = ">=19.2.0" six = "*" [package.extras] -visualize = ["graphviz (>0.5.1)", "Twisted (>=16.1.1)"] +visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"] [[package]] name = "bcrypt" @@ -177,7 +177,7 @@ optional = false python-versions = "*" [package.extras] -test = ["hypothesis (==3.55.3)", "flake8 (==3.7.8)"] +test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] [[package]] name = "constantly" @@ -199,12 +199,12 @@ python-versions = ">=3.6" cffi = ">=1.12" [package.extras] -docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] -docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] +docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx_rtd_theme"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] sdist = ["setuptools_rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"] +test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] [[package]] name = "defusedxml" @@ -226,7 +226,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" wrapt = ">=1.10,<2" [package.extras] -dev = ["tox", "bump2version (<1)", "sphinx (<2)", "importlib-metadata (<3)", "importlib-resources (<4)", "configparser (<5)", "sphinxcontrib-websupport (<2)", "zipp (<2)", "PyTest (<5)", "PyTest-Cov (<2.6)", "pytest", "pytest-cov"] +dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version (<1)", "configparser (<5)", "importlib-metadata (<3)", "importlib-resources (<4)", "sphinx (<2)", "sphinxcontrib-websupport (<2)", "tox", "zipp (<2)"] [[package]] name = "docutils" @@ -245,7 +245,7 @@ optional = true python-versions = ">=3.7" [package.extras] -dev = ["tox", "coverage", "lxml", "xmlschema (>=1.8.0)", "sphinx", "memory-profiler", "flake8", "mypy (==0.910)"] +dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910)", "tox", "xmlschema (>=1.8.0)"] [[package]] name = "flake8" @@ -274,7 +274,7 @@ attrs = ">=19.2.0" flake8 = ">=3.0.0" [package.extras] -dev = ["coverage", "black", "hypothesis", "hypothesmith"] +dev = ["black", "coverage", "hypothesis", "hypothesmith"] [[package]] name = "flake8-comprehensions" @@ -367,8 +367,8 @@ typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", "pyfakefs", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] [[package]] name = "importlib-resources" @@ -382,8 +382,8 @@ python-versions = ">=3.6" zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] [[package]] name = "incremental" @@ -405,9 +405,9 @@ optional = false python-versions = ">=3.6,<4.0" [package.extras] -pipfile_deprecated_finder = ["pipreqs", "requirementslib"] -requirements_deprecated_finder = ["pipreqs", "pip-api"] colors = ["colorama (>=0.4.3,<0.5.0)"] +pipfile_deprecated_finder = ["pipreqs", "requirementslib"] +requirements_deprecated_finder = ["pip-api", "pipreqs"] [[package]] name = "jaeger-client" @@ -424,7 +424,7 @@ thrift = "*" tornado = ">=4.3" [package.extras] -tests = ["mock", "pycurl", "pytest", "pytest-cov", "coverage", "pytest-timeout", "pytest-tornado", "pytest-benchmark", "pytest-localserver", "flake8", "flake8-quotes", "flake8-typing-imports", "codecov", "tchannel (==2.1.0)", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "mypy"] +tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"] [[package]] name = "jeepney" @@ -435,8 +435,8 @@ optional = false python-versions = ">=3.6" [package.extras] -trio = ["async-generator", "trio"] -test = ["async-timeout", "trio", "testpath", "pytest-asyncio", "pytest-trio", "pytest"] +test = ["async-timeout", "pytest", "pytest-asyncio", "pytest-trio", "testpath", "trio"] +trio = ["async_generator", "trio"] [[package]] name = "jinja2" @@ -486,8 +486,8 @@ pywin32-ctypes = {version = "<0.1.0 || >0.1.0,<0.1.1 || >0.1.1", markers = "sys_ SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "jaraco.tidelift (>=1.4)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["jaraco.packaging (>=8.2)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] [[package]] name = "ldap3" @@ -511,7 +511,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" [package.extras] cssselect = ["cssselect (>=0.7)"] html5 = ["html5lib"] -htmlsoup = ["beautifulsoup4"] +htmlsoup = ["BeautifulSoup4"] source = ["Cython (>=0.29.7)"] [[package]] @@ -535,8 +535,8 @@ attrs = "*" importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""} [package.extras] -test = ["aiounittest", "twisted", "tox"] -dev = ["twine (==4.0.1)", "build (==0.8.0)", "isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "mypy (==0.910)", "aiounittest", "twisted", "tox"] +dev = ["aiounittest", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "mypy (==0.910)", "tox", "twine (==4.0.1)", "twisted"] +test = ["aiounittest", "tox", "twisted"] [[package]] name = "matrix-synapse-ldap3" @@ -552,7 +552,7 @@ service-identity = "*" Twisted = ">=15.1.0" [package.extras] -dev = ["isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "types-setuptools", "mypy (==0.910)", "ldaptor", "tox", "matrix-synapse"] +dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==0.910)", "tox", "types-setuptools"] [[package]] name = "mccabe" @@ -611,7 +611,7 @@ mypy = "0.950" "zope.schema" = "*" [package.extras] -test = ["pytest (>=4.6)", "pytest-cov", "lxml"] +test = ["lxml", "pytest (>=4.6)", "pytest-cov"] [[package]] name = "netaddr" @@ -630,7 +630,7 @@ optional = true python-versions = "*" [package.extras] -tests = ["doubles", "flake8", "flake8-quotes", "mock", "pytest", "pytest-cov", "pytest-mock", "sphinx", "sphinx-rtd-theme", "six (>=1.10.0,<2.0)", "gevent", "tornado"] +tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pytest", "pytest-cov", "pytest-mock", "six (>=1.10.0,<2.0)", "sphinx_rtd_theme", "tornado"] [[package]] name = "packaging" @@ -835,10 +835,10 @@ optional = false python-versions = ">=3.6" [package.extras] -tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] -docs = ["zope.interface", "sphinx-rtd-theme", "sphinx"] -dev = ["pre-commit", "mypy", "coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)", "cryptography (>=3.3.1)", "zope.interface", "sphinx-rtd-theme", "sphinx"] crypto = ["cryptography (>=3.3.1)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pymacaroons" @@ -872,8 +872,8 @@ python-versions = ">=3.6" cffi = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] -tests = ["pytest (>=3.2.1,!=3.3.0)", "hypothesis (>=3.27.0)"] +docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" @@ -925,11 +925,12 @@ pyOpenSSL = "*" python-dateutil = "*" pytz = "*" requests = ">=1.0.0" +setuptools = "*" six = "*" xmlschema = ">=1.2.1" [package.extras] -s2repoze = ["paste", "zope.interface", "repoze.who"] +s2repoze = ["paste", "repoze.who", "zope.interface"] [[package]] name = "python-dateutil" @@ -1054,11 +1055,11 @@ celery = ["celery (>=3)"] chalice = ["chalice (>=1.16.0)"] django = ["django (>=1.8)"] falcon = ["falcon (>=1.4)"] -flask = ["flask (>=0.11)", "blinker (>=1.1)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)"] httpx = ["httpx (>=0.16.0)"] -pure_eval = ["pure-eval", "executing", "asttokens"] +pure_eval = ["asttokens", "executing", "pure-eval"] pyspark = ["pyspark (>=2.4.4)"] -quart = ["quart (>=0.16.1)", "blinker (>=1.1)"] +quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] rq = ["rq (>=0.6)"] sanic = ["sanic (>=0.8)"] sqlalchemy = ["sqlalchemy (>=1.2)"] @@ -1080,11 +1081,24 @@ pyasn1-modules = "*" six = "*" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "pytest", "sphinx", "furo", "idna", "pyopenssl"] -docs = ["sphinx", "furo"] +dev = ["coverage[toml] (>=5.0.2)", "furo", "idna", "pyOpenSSL", "pytest", "sphinx"] +docs = ["furo", "sphinx"] idna = ["idna"] tests = ["coverage[toml] (>=5.0.2)", "pytest"] +[[package]] +name = "setuptools" +version = "65.3.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + [[package]] name = "signedjson" version = "1.1.4" @@ -1199,6 +1213,7 @@ click = "*" click-default-group = "*" incremental = "*" jinja2 = "*" +setuptools = "*" tomli = {version = "*", markers = "python_version >= \"3.6\""} [package.extras] @@ -1236,7 +1251,7 @@ requests = ">=2.1.0" Twisted = {version = ">=18.7.0", extras = ["tls"]} [package.extras] -dev = ["pep8", "pyflakes", "httpbin (==0.5.0)"] +dev = ["httpbin (==0.5.0)", "pep8", "pyflakes"] docs = ["sphinx (>=1.4.8)"] [[package]] @@ -1281,20 +1296,20 @@ typing-extensions = ">=3.6.5" "zope.interface" = ">=4.4.2" [package.extras] -all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"] -conch = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)"] -conch_nacl = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pynacl"] +all_non_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] +conch_nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] contextvars = ["contextvars (>=2.4,<3)"] -dev = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "python-subunit (>=1.4,<2.0)", "pydoctor (>=21.9.0,<21.10.0)"] -dev_release = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pydoctor (>=21.9.0,<21.10.0)"] +dev = ["coverage (>=6b1,<7)", "pydoctor (>=21.9.0,<21.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)"] +dev_release = ["pydoctor (>=21.9.0,<21.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] -macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"] -mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pynacl", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"] -osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"] +macos_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=21.9.0,<21.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"] +osx_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] -test = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)"] -tls = ["pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)"] -windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"] +test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)"] +tls = ["idna (>=2.4)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)"] +windows_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] [[package]] name = "twisted-iocpsupport" @@ -1472,7 +1487,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" [package.extras] brotli = ["brotlipy (>=0.6.0)"] -secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] @@ -1504,8 +1519,8 @@ elementpath = ">=2.5.0,<3.0.0" [package.extras] codegen = ["elementpath (>=2.5.0,<3.0.0)", "jinja2"] -dev = ["tox", "coverage", "lxml", "elementpath (>=2.5.0,<3.0.0)", "memory-profiler", "sphinx", "sphinx-rtd-theme", "jinja2", "flake8", "mypy", "lxml-stubs"] -docs = ["elementpath (>=2.5.0,<3.0.0)", "sphinx", "sphinx-rtd-theme", "jinja2"] +dev = ["Sphinx", "coverage", "elementpath (>=2.5.0,<3.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"] +docs = ["Sphinx", "elementpath (>=2.5.0,<3.0.0)", "jinja2", "sphinx-rtd-theme"] [[package]] name = "zipp" @@ -1516,8 +1531,8 @@ optional = false python-versions = ">=3.7" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] +testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] [[package]] name = "zope.event" @@ -1527,8 +1542,11 @@ category = "dev" optional = false python-versions = "*" +[package.dependencies] +setuptools = "*" + [package.extras] -docs = ["sphinx"] +docs = ["Sphinx"] test = ["zope.testrunner"] [[package]] @@ -1539,8 +1557,11 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +[package.dependencies] +setuptools = "*" + [package.extras] -docs = ["sphinx", "repoze.sphinx.autointerface"] +docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] @@ -1553,11 +1574,12 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [package.dependencies] +setuptools = "*" "zope.event" = "*" "zope.interface" = ">=5.0.0" [package.extras] -docs = ["sphinx", "repoze.sphinx.autointerface"] +docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] @@ -2458,6 +2480,10 @@ service-identity = [ {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, {file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"}, ] +setuptools = [ + {file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"}, + {file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"}, +] signedjson = [ {file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"}, {file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"}, -- cgit 1.4.1 From 36b184b78257fa4455293b7d7faa7a53a4261383 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 2 Sep 2022 11:29:51 -0500 Subject: Update docs to make enabling metrics more clear (#13678) It was really easy to miss the `enable_metrics: True` step with the previous language. --- changelog.d/13678.doc | 1 + docs/metrics-howto.md | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13678.doc diff --git a/changelog.d/13678.doc b/changelog.d/13678.doc new file mode 100644 index 0000000000..8b85dfe643 --- /dev/null +++ b/changelog.d/13678.doc @@ -0,0 +1 @@ +Update docs to make enabling metrics more clear. diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md index 4a77d5604c..279303a798 100644 --- a/docs/metrics-howto.md +++ b/docs/metrics-howto.md @@ -7,7 +7,13 @@ 1. Enable Synapse metrics: - There are two methods of enabling metrics in Synapse. + In `homeserver.yaml`, make sure `enable_metrics` is + set to `True`. + +1. Enable the `/_synapse/metrics` Synapse endpoint that Prometheus uses to + collect data: + + There are two methods of enabling the metrics endpoint in Synapse. The first serves the metrics as a part of the usual web server and can be enabled by adding the \"metrics\" resource to the existing @@ -41,9 +47,6 @@ - '0.0.0.0' ``` - For both options, you will need to ensure that `enable_metrics` is - set to `True`. - 1. Restart Synapse. 1. Add a Prometheus target for Synapse. -- cgit 1.4.1 From 877bdfa889fa07d09f385df297a9282d51d50dae Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 2 Sep 2022 14:05:39 -0500 Subject: Clarify `(room_id, event_id)` global uniqueness (#13701) Summarized from @richvdh's reply at https://github.com/matrix-org/synapse/pull/13589#discussion_r961116999 --- changelog.d/13701.doc | 1 + docs/development/database_schema.md | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 changelog.d/13701.doc diff --git a/changelog.d/13701.doc b/changelog.d/13701.doc new file mode 100644 index 0000000000..b438e066d8 --- /dev/null +++ b/changelog.d/13701.doc @@ -0,0 +1 @@ +Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index d996a7caa2..e9b925ddd8 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -191,3 +191,27 @@ There are three separate aspects to this: flavour will be accepted by SQLite 3.22, but will give a column whose default value is the **string** `"FALSE"` - which, when cast back to a boolean in Python, evaluates to `True`. + + +## `event_id` global uniqueness + +In room versions `1` and `2` it's possible to end up with two events with the +same `event_id` (in the same or different rooms). After room version `3`, that +can only happen with a hash collision, which we basically hope will never +happen. + +There are several places in Synapse and even Matrix APIs like [`GET +/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid) +where we assume that event IDs are globally unique. + +But hash collisions are still possible, and by treating event IDs as room +scoped, we can reduce the possibility of a hash collision. When scoping +`event_id` in the database schema, it should be also accompanied by `room_id` +(`PRIMARY KEY (room_id, event_id)`) and lookups should be done through the pair +`(room_id, event_id)`. + +There has been a lot of debate on this in places like +https://github.com/matrix-org/matrix-spec-proposals/issues/2779 and +[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which +has no resolution yet (as of 2022-09-01). + -- cgit 1.4.1 From ad7fc8e92f53217469342f4593907cd4ce13a930 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 3 Sep 2022 11:08:28 +0100 Subject: fix grammar --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 84e5310309..d116cd51fb 100644 --- a/README.rst +++ b/README.rst @@ -3,7 +3,7 @@ Synapse |support| |development| |documentation| |license| |pypi| |python| ========================================================================= Synapse is an open-source `Matrix `_ homeserver written and -maintained by the Matrix.org Foundation. We began rapid development began in 2014, +maintained by the Matrix.org Foundation. We began rapid development in 2014, reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues in earnest today. -- cgit 1.4.1 From 898fef2789c9b1a20ef53c7d588f536f51f0fe2f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 5 Sep 2022 12:26:43 +0200 Subject: Share some metrics between the Prometheus exporter and the phone home stats (#13671) --- changelog.d/13671.misc | 1 + synapse/app/_base.py | 5 ++- synapse/app/phone_stats_home.py | 13 +++++- synapse/metrics/common_usage_metrics.py | 79 +++++++++++++++++++++++++++++++++ synapse/server.py | 6 +++ tests/test_phone_home.py | 46 ++++++++++++++++++- 6 files changed, 146 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13671.misc create mode 100644 synapse/metrics/common_usage_metrics.py diff --git a/changelog.d/13671.misc b/changelog.d/13671.misc new file mode 100644 index 0000000000..f1c62b5b1e --- /dev/null +++ b/changelog.d/13671.misc @@ -0,0 +1 @@ +Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 4742435d3b..9a24bed0a0 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -511,9 +511,10 @@ async def start(hs: "HomeServer") -> None: setup_sentry(hs) setup_sdnotify(hs) - # If background tasks are running on the main process, start collecting the - # phone home stats. + # If background tasks are running on the main process or this is the worker in + # charge of them, start collecting the phone home stats and shared usage metrics. if hs.config.worker.run_background_tasks: + await hs.get_common_usage_metrics_manager().setup() start_phone_stats_home(hs) # We now freeze all allocated objects in the hopes that (almost) diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 40dbdace8e..51c8d15711 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -51,6 +51,16 @@ async def phone_stats_home( stats: JsonDict, stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process, ) -> None: + """Collect usage statistics and send them to the configured endpoint. + + Args: + hs: the HomeServer object to use for gathering usage data. + stats: the dict in which to store the statistics sent to the configured + endpoint. Mostly used in tests to figure out the data that is supposed to + be sent. + stats_process: statistics about resource usage of the process. + """ + logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) # Ensure the homeserver has started. @@ -83,6 +93,7 @@ async def phone_stats_home( # store = hs.get_datastores().main + common_metrics = await hs.get_common_usage_metrics_manager().get_metrics() stats["homeserver"] = hs.config.server.server_name stats["server_context"] = hs.config.server.server_context @@ -104,7 +115,7 @@ async def phone_stats_home( room_count = await store.get_room_count() stats["total_room_count"] = room_count - stats["daily_active_users"] = await store.count_daily_users() + stats["daily_active_users"] = common_metrics.daily_active_users stats["monthly_active_users"] = await store.count_monthly_users() daily_active_e2ee_rooms = await store.count_daily_active_e2ee_rooms() stats["daily_active_e2ee_rooms"] = daily_active_e2ee_rooms diff --git a/synapse/metrics/common_usage_metrics.py b/synapse/metrics/common_usage_metrics.py new file mode 100644 index 0000000000..0a22ea3d92 --- /dev/null +++ b/synapse/metrics/common_usage_metrics.py @@ -0,0 +1,79 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +import attr + +from synapse.metrics.background_process_metrics import run_as_background_process + +if TYPE_CHECKING: + from synapse.server import HomeServer + +from prometheus_client import Gauge + +# Gauge to expose daily active users metrics +current_dau_gauge = Gauge( + "synapse_admin_daily_active_users", + "Current daily active users count", +) + + +@attr.s(auto_attribs=True) +class CommonUsageMetrics: + """Usage metrics shared between the phone home stats and the prometheus exporter.""" + + daily_active_users: int + + +class CommonUsageMetricsManager: + """Collects common usage metrics.""" + + def __init__(self, hs: "HomeServer") -> None: + self._store = hs.get_datastores().main + self._clock = hs.get_clock() + + async def get_metrics(self) -> CommonUsageMetrics: + """Get the CommonUsageMetrics object. If no collection has happened yet, do it + before returning the metrics. + + Returns: + The CommonUsageMetrics object to read common metrics from. + """ + return await self._collect() + + async def setup(self) -> None: + """Keep the gauges for common usage metrics up to date.""" + await self._update_gauges() + self._clock.looping_call( + run_as_background_process, + 5 * 60 * 1000, + desc="common_usage_metrics_update_gauges", + func=self._update_gauges, + ) + + async def _collect(self) -> CommonUsageMetrics: + """Collect the common metrics and either create the CommonUsageMetrics object to + use if it doesn't exist yet, or update it. + """ + dau_count = await self._store.count_daily_users() + + return CommonUsageMetrics( + daily_active_users=dau_count, + ) + + async def _update_gauges(self) -> None: + """Update the Prometheus gauges.""" + metrics = await self._collect() + + current_dau_gauge.set(float(metrics.daily_active_users)) diff --git a/synapse/server.py b/synapse/server.py index c2e55bf0b1..5a99c0b344 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -105,6 +105,7 @@ from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler from synapse.handlers.user_directory import UserDirectoryHandler from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager from synapse.module_api import ModuleApi from synapse.notifier import Notifier from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator @@ -829,3 +830,8 @@ class HomeServer(metaclass=abc.ABCMeta): self.config.ratelimiting.rc_message, self.config.ratelimiting.rc_admin_redaction, ) + + @cache_in_self + def get_common_usage_metrics_manager(self) -> CommonUsageMetricsManager: + """Usage metrics shared between phone home stats and the prometheus exporter.""" + return CommonUsageMetricsManager(self) diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index b01cae6e5d..cc1a98f1c4 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -15,8 +15,14 @@ import resource from unittest import mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.app.phone_stats_home import phone_stats_home +from synapse.rest import admin +from synapse.rest.client import login, sync +from synapse.server import HomeServer from synapse.types import JsonDict +from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -47,5 +53,43 @@ class PhoneHomeStatsTestCase(HomeserverTestCase): stats: JsonDict = {} self.reactor.advance(1) # `old_resource` has type `Mock` instead of `struct_rusage` - self.get_success(phone_stats_home(self.hs, stats, past_stats)) # type: ignore[arg-type] + self.get_success( + phone_stats_home(self.hs, stats, past_stats) # type: ignore[arg-type] + ) self.assertApproximates(stats["cpu_average"], 100, tolerance=2.5) + + +class CommonMetricsTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.metrics_manager = hs.get_common_usage_metrics_manager() + self.get_success(self.metrics_manager.setup()) + + def test_dau(self) -> None: + """Tests that the daily active users count is correctly updated.""" + self._assert_metric_value("daily_active_users", 0) + + self.register_user("user", "password") + tok = self.login("user", "password") + self.make_request("GET", "/sync", access_token=tok) + + self.pump(1) + + self._assert_metric_value("daily_active_users", 1) + + def _assert_metric_value(self, metric_name: str, expected: int) -> None: + """Compare the given value to the current value of the common usage metric with + the given name. + + Args: + metric_name: The metric to look up. + expected: Expected value for this metric. + """ + metrics = self.get_success(self.metrics_manager.get_metrics()) + value = getattr(metrics, metric_name) + self.assertEqual(value, expected) -- cgit 1.4.1 From 8cb9261598becb3ed790b4268ff713f5e1122a21 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 5 Sep 2022 12:13:13 +0100 Subject: Fix typechecking with latest `types-jsonschema` (#13712) --- changelog.d/13712.misc | 1 + synapse/api/filtering.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13712.misc diff --git a/changelog.d/13712.misc b/changelog.d/13712.misc new file mode 100644 index 0000000000..2c4f6b19f6 --- /dev/null +++ b/changelog.d/13712.misc @@ -0,0 +1 @@ +Fix typechecking with latest types-jsonschema. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index b007147519..102889ac49 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -140,13 +140,13 @@ USER_FILTER_SCHEMA = { @FormatChecker.cls_checks("matrix_room_id") -def matrix_room_id_validator(room_id_str: str) -> RoomID: - return RoomID.from_string(room_id_str) +def matrix_room_id_validator(room_id_str: str) -> bool: + return RoomID.is_valid(room_id_str) @FormatChecker.cls_checks("matrix_user_id") -def matrix_user_id_validator(user_id_str: str) -> UserID: - return UserID.from_string(user_id_str) +def matrix_user_id_validator(user_id_str: str) -> bool: + return UserID.is_valid(user_id_str) class Filtering: -- cgit 1.4.1 From c7b18d9d44c90acfd4ceaec1fa2f8275e03f14af Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 5 Sep 2022 11:16:59 +0000 Subject: Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. (#13483) --- changelog.d/13483.misc | 1 + scripts-dev/release.py | 146 +++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 143 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13483.misc diff --git a/changelog.d/13483.misc b/changelog.d/13483.misc new file mode 100644 index 0000000000..4fe6dbbea4 --- /dev/null +++ b/changelog.d/13483.misc @@ -0,0 +1 @@ +Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 46220c4dd3..6603bc593b 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -18,10 +18,12 @@ """ import glob +import json import os import re import subprocess import sys +import time import urllib.request from os import path from tempfile import TemporaryDirectory @@ -71,18 +73,21 @@ def cli() -> None: ./scripts-dev/release.py tag - # ... wait for assets to build ... + # wait for assets to build, either manually or with: + ./scripts-dev/release.py wait-for-actions ./scripts-dev/release.py publish ./scripts-dev/release.py upload - # Optional: generate some nice links for the announcement - ./scripts-dev/release.py merge-back + # Optional: generate some nice links for the announcement ./scripts-dev/release.py announce + Alternatively, `./scripts-dev/release.py full` will do all the above + as well as guiding you through the manual steps. + If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the `tag`/`publish` command, then a new draft release will be created/published. """ @@ -90,6 +95,10 @@ def cli() -> None: @cli.command() def prepare() -> None: + _prepare() + + +def _prepare() -> None: """Do the initial stages of creating a release, including creating release branch, updating changelog and pushing to GitHub. """ @@ -284,6 +293,10 @@ def prepare() -> None: @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"]) def tag(gh_token: Optional[str]) -> None: + _tag(gh_token) + + +def _tag(gh_token: Optional[str]) -> None: """Tags the release and generates a draft GitHub release""" # Make sure we're in a git repo. @@ -374,6 +387,10 @@ def tag(gh_token: Optional[str]) -> None: @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True) def publish(gh_token: str) -> None: + _publish(gh_token) + + +def _publish(gh_token: str) -> None: """Publish release on GitHub.""" # Make sure we're in a git repo. @@ -411,6 +428,10 @@ def publish(gh_token: str) -> None: @cli.command() def upload() -> None: + _upload() + + +def _upload() -> None: """Upload release to pypi.""" current_version = get_package_version() @@ -479,8 +500,75 @@ def _merge_into(repo: Repo, source: str, target: str) -> None: repo.remote().push() +@cli.command() +@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False) +def wait_for_actions(gh_token: Optional[str]) -> None: + _wait_for_actions(gh_token) + + +def _wait_for_actions(gh_token: Optional[str]) -> None: + # Find out the version and tag name. + current_version = get_package_version() + tag_name = f"v{current_version}" + + # Authentication is optional on this endpoint, + # but use a token if we have one to reduce the chance of being rate-limited. + url = f"https://api.github.com/repos/matrix-org/synapse/actions/runs?branch={tag_name}" + headers = {"Accept": "application/vnd.github+json"} + if gh_token is not None: + headers["authorization"] = f"token {gh_token}" + req = urllib.request.Request(url, headers=headers) + + time.sleep(10 * 60) + while True: + time.sleep(5 * 60) + response = urllib.request.urlopen(req) + resp = json.loads(response.read()) + + if len(resp["workflow_runs"]) == 0: + continue + + if all( + workflow["status"] != "in_progress" for workflow in resp["workflow_runs"] + ): + success = ( + workflow["status"] == "completed" for workflow in resp["workflow_runs"] + ) + if success: + _notify("Workflows successful. You can now continue the release.") + else: + _notify("Workflows failed.") + click.confirm("Continue anyway?", abort=True) + + break + + +def _notify(message: str) -> None: + # Send a bell character. Most terminals will play a sound or show a notification + # for this. + click.echo(f"\a{message}") + + # Try and run notify-send, but don't raise an Exception if this fails + # (This is best-effort) + # TODO Support other platforms? + subprocess.run( + [ + "notify-send", + "--app-name", + "Synapse Release Script", + "--expire-time", + "3600000", + message, + ] + ) + + @cli.command() def merge_back() -> None: + _merge_back() + + +def _merge_back() -> None: """Merge the release branch back into the appropriate branches. All branches will be automatically pulled from the remote and the results will be pushed to the remote.""" @@ -519,6 +607,10 @@ def merge_back() -> None: @cli.command() def announce() -> None: + _announce() + + +def _announce() -> None: """Generate markdown to announce the release.""" current_version = get_package_version() @@ -548,10 +640,56 @@ Announce the release in - #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic - #synapse:matrix.org (Synapse Admins), bumping the version in the topic - #synapse-dev:matrix.org -- #synapse-package-maintainers:matrix.org""" +- #synapse-package-maintainers:matrix.org + +Ask the designated people to do the blog and tweets.""" ) +@cli.command() +@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True) +def full(gh_token: str) -> None: + click.echo("1. If this is a security release, read the security wiki page.") + click.echo("2. Check for any release blockers before proceeding.") + click.echo(" https://github.com/matrix-org/synapse/labels/X-Release-Blocker") + + click.confirm("Ready?", abort=True) + + click.echo("\n*** prepare ***") + _prepare() + + click.echo("Deploy to matrix.org and ensure that it hasn't fallen over.") + click.echo("Remember to silence the alerts to prevent alert spam.") + click.confirm("Deployed?", abort=True) + + click.echo("\n*** tag ***") + _tag(gh_token) + + click.echo("\n*** wait for actions ***") + _wait_for_actions(gh_token) + + click.echo("\n*** publish ***") + _publish(gh_token) + + click.echo("\n*** upload ***") + _upload() + + click.echo("\n*** merge back ***") + _merge_back() + + click.echo("\nUpdate the Debian repository") + click.confirm("Started updating Debian repository?", abort=True) + + click.echo("\nWait for all release methods to be ready.") + # Docker should be ready because it was done by the workflows earlier + # PyPI should be ready because we just ran upload(). + # TODO Automatically poll until the Debs have made it to packages.matrix.org + click.confirm("Debs ready?", abort=True) + + click.echo("\n*** announce ***") + _announce() + + def get_package_version() -> version.Version: version_string = subprocess.check_output(["poetry", "version", "--short"]).decode( "utf-8" -- cgit 1.4.1 From 8edf3f66d50adc79d432e0868e6d21ed3b136bd0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 5 Sep 2022 14:31:42 +0200 Subject: Reduce number of CI jobs run on PRs (#13713) * Reduce number of CI jobs run on PRs * Newsfile * Also limit sytest jobs * Fix typo * Fix up * Fixup --- .ci/scripts/calculate_jobs.py | 128 ++++++++++++++++++++++++++++++++++++++++++ .github/workflows/tests.yml | 97 ++++++++++---------------------- changelog.d/13713.misc | 1 + 3 files changed, 160 insertions(+), 66 deletions(-) create mode 100755 .ci/scripts/calculate_jobs.py create mode 100644 changelog.d/13713.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py new file mode 100755 index 0000000000..b1f604eeb0 --- /dev/null +++ b/.ci/scripts/calculate_jobs.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Calculate the trial jobs to run based on if we're in a PR or not. + +import json +import os + +IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/") + +# First calculate the various trial jobs. +# +# For each type of test we only run on Py3.7 on PRs + +trial_sqlite_tests = [ + { + "python-version": "3.7", + "database": "sqlite", + "extras": "all", + } +] + +if not IS_PR: + trial_sqlite_tests.extend( + { + "python-version": version, + "database": "sqlite", + "extras": "all", + } + for version in ("3.8", "3.9", "3.10") + ) + + +trial_postgres_tests = [ + { + "python-version": "3.7", + "database": "postgres", + "postgres-version": "10", + "extras": "all", + } +] + +if not IS_PR: + trial_postgres_tests.append( + { + "python-version": "3.10", + "database": "postgres", + "postgres-version": "14", + "extras": "all", + } + ) + +trial_no_extra_tests = [ + { + "python-version": "3.7", + "database": "sqlite", + "extras": "", + } +] + +print("::group::Calculated trial jobs") +print( + json.dumps( + trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4 + ) +) +print("::endgroup::") + +test_matrix = json.dumps( + trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests +) +print(f"::set-output name=trial_test_matrix::{test_matrix}") + + +# First calculate the various sytest jobs. +# +# For each type of test we only run on focal on PRs + + +sytest_tests = [ + { + "sytest-tag": "focal", + }, + { + "sytest-tag": "focal", + "postgres": "postgres", + }, + { + "sytest-tag": "focal", + "postgres": "multi-postgres", + "workers": "workers", + }, +] + +if not IS_PR: + sytest_tests.extend( + [ + { + "sytest-tag": "testing", + "postgres": "postgres", + }, + { + "sytest-tag": "buster", + "postgres": "multi-postgres", + "workers": "workers", + }, + ] + ) + + +print("::group::Calculated sytest jobs") +print(json.dumps(sytest_tests, indent=4)) +print("::endgroup::") + +test_matrix = json.dumps(sytest_tests) +print(f"::set-output name=sytest_test_matrix::{test_matrix}") diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 720cb13907..3ce4ffb036 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -73,63 +73,48 @@ jobs: steps: - run: "true" - trial: + calculate-test-jobs: if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail needs: linting-done runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - id: get-matrix + run: .ci/scripts/calculate_jobs.py + outputs: + trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }} + sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }} + + trial: + if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail + needs: calculate-test-jobs + runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] - database: ["sqlite"] - extras: ["all"] - is_pr: - - ${{ startsWith(github.ref, 'refs/pull/') }} - - # If we're a PR then we only test min and max python. - exclude: - - is_pr: true - python-version: 3.8 - - is_pr: true - python-version: 3.9 - - include: - # Newest Python without optional deps - - python-version: "3.10" - extras: "" - - # Oldest Python with PostgreSQL - - python-version: "3.7" - database: "postgres" - postgres-version: "10" - extras: "all" - - # Newest Python with newest PostgreSQL - - python-version: "3.10" - database: "postgres" - postgres-version: "14" - extras: "all" + job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }} steps: - uses: actions/checkout@v2 - run: sudo apt-get -qq install xmlsec1 - - name: Set up PostgreSQL ${{ matrix.postgres-version }} - if: ${{ matrix.postgres-version }} + - name: Set up PostgreSQL ${{ matrix.job.postgres-version }} + if: ${{ matrix.job.postgres-version }} run: | docker run -d -p 5432:5432 \ -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ - postgres:${{ matrix.postgres-version }} + postgres:${{ matrix.job.postgres-version }} - uses: matrix-org/setup-python-poetry@v1 with: - python-version: ${{ matrix.python-version }} - extras: ${{ matrix.extras }} + python-version: ${{ matrix.job.python-version }} + extras: ${{ matrix.job.extras }} - name: Await PostgreSQL - if: ${{ matrix.postgres-version }} + if: ${{ matrix.job.postgres-version }} timeout-minutes: 2 run: until pg_isready -h localhost; do sleep 1; done - run: poetry run trial --jobs=2 tests env: - SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }} + SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }} SYNAPSE_POSTGRES_HOST: localhost SYNAPSE_POSTGRES_USER: postgres SYNAPSE_POSTGRES_PASSWORD: postgres @@ -208,45 +193,25 @@ jobs: sytest: if: ${{ !failure() && !cancelled() }} - needs: linting-done + needs: calculate-test-jobs runs-on: ubuntu-latest container: - image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }} + image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }} volumes: - ${{ github.workspace }}:/src env: SYTEST_BRANCH: ${{ github.head_ref }} - POSTGRES: ${{ matrix.postgres && 1}} - MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}} - WORKERS: ${{ matrix.workers && 1 }} - REDIS: ${{ matrix.redis && 1 }} - BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} + POSTGRES: ${{ matrix.job.postgres && 1}} + MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}} + WORKERS: ${{ matrix.job.workers && 1 }} + REDIS: 1 + BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }} TOP: ${{ github.workspace }} strategy: fail-fast: false matrix: - include: - - sytest-tag: focal - - - sytest-tag: focal - postgres: postgres - - - sytest-tag: testing - postgres: postgres - - - sytest-tag: focal - postgres: multi-postgres - workers: workers - - - sytest-tag: buster - postgres: multi-postgres - workers: workers - - - sytest-tag: buster - postgres: postgres - workers: workers - redis: redis + job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }} steps: - uses: actions/checkout@v2 @@ -262,7 +227,7 @@ jobs: uses: actions/upload-artifact@v2 if: ${{ always() }} with: - name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }}) + name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }}) path: | /logs/results.tap /logs/**/*.log* diff --git a/changelog.d/13713.misc b/changelog.d/13713.misc new file mode 100644 index 0000000000..1044099542 --- /dev/null +++ b/changelog.d/13713.misc @@ -0,0 +1 @@ +Reduce number of CI checks we run for PRs. -- cgit 1.4.1 From 32fc3b7ba4702a0068a82bdd0595e2f426967d4d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 6 Sep 2022 03:50:02 -0400 Subject: Remove configuration options for direct TCP replication. (#13647) Removes the ability to configure legacy direct TCP replication. Workers now require Redis to run. --- .github/workflows/tests.yml | 1 - changelog.d/13647.removal | 1 + docs/upgrade.md | 15 ++++++ docs/usage/configuration/config_documentation.md | 2 - docs/workers.md | 22 ++------- synapse/app/homeserver.py | 11 ----- synapse/config/server.py | 16 +++++-- synapse/config/workers.py | 8 ++-- synapse/replication/tcp/handler.py | 58 +++++++++--------------- tests/app/test_openid_listener.py | 4 +- tests/test_server.py | 2 +- tests/utils.py | 1 - 12 files changed, 63 insertions(+), 78 deletions(-) create mode 100644 changelog.d/13647.removal diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3ce4ffb036..bc1de2893c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -204,7 +204,6 @@ jobs: POSTGRES: ${{ matrix.job.postgres && 1}} MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}} WORKERS: ${{ matrix.job.workers && 1 }} - REDIS: 1 BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }} TOP: ${{ github.workspace }} diff --git a/changelog.d/13647.removal b/changelog.d/13647.removal new file mode 100644 index 0000000000..0190a65dba --- /dev/null +++ b/changelog.d/13647.removal @@ -0,0 +1 @@ +Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse v1.18.0. Workers now require using Redis. diff --git a/docs/upgrade.md b/docs/upgrade.md index 422a3da664..c6219d06e8 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -91,6 +91,21 @@ process, for example: # Upgrading to v1.67.0 +## Direct TCP replication is no longer supported: migrate to Redis + +Redis support was added in v1.13.0 with it becoming the recommended method in +v1.18.0. It replaced the old direct TCP connections (which was deprecated as of +v1.18.0) to the main process. With Redis, rather than all the workers connecting +to the main process, all the workers and the main process connect to Redis, +which relays replication commands between processes. This can give a significant +CPU saving on the main process and is a prerequisite for upcoming +performance improvements. + +To migrate to Redis add the [`redis` config](./workers.md#shared-configuration), +and remove the TCP `replication` listener from config of the master and +`worker_replication_port` from worker config. Note that a HTTP listener with a +`replication` resource is still required. + ## Minimum version of Poetry is now v1.2.0 The minimum supported version of poetry is now 1.2. This should only affect diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 396c560822..757957a1d5 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -431,8 +431,6 @@ Sub-options for each listener include: * `metrics`: (see the docs [here](../../metrics-howto.md)), - * `replication`: (deprecated as of Synapse 1.18, see the docs [here](../../workers.md)). - * `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path. * `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is diff --git a/docs/workers.md b/docs/workers.md index 176bb1475e..40b1852313 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -32,13 +32,8 @@ stream between all configured Synapse processes. Additionally, processes may make HTTP requests to each other, primarily for operations which need to wait for a reply ─ such as sending an event. -Redis support was added in v1.13.0 with it becoming the recommended method in -v1.18.0. It replaced the old direct TCP connections (which is deprecated as of -v1.18.0) to the main process. With Redis, rather than all the workers connecting -to the main process, all the workers and the main process connect to Redis, -which relays replication commands between processes. This can give a significant -cpu saving on the main process and will be a prerequisite for upcoming -performance improvements. +All the workers and the main process connect to Redis, which relays replication +commands between processes. If Redis support is enabled Synapse will use it as a shared cache, as well as a pub/sub mechanism. @@ -330,7 +325,6 @@ effects of bursts of events from that bridge on events sent by normal users. Additionally, the writing of specific streams (such as events) can be moved off of the main process to a particular worker. -(This is only supported with Redis-based replication.) To enable this, the worker must have a HTTP replication listener configured, have a `worker_name` and be listed in the `instance_map` config. The same worker @@ -600,15 +594,9 @@ equivalent to `synapse.app.generic_worker`: ## Migration from old config -There are two main independent changes that have been made: introducing Redis -support and merging apps into `synapse.app.generic_worker`. Both these changes -are backwards compatible and so no changes to the config are required, however -server admins are encouraged to plan to migrate to Redis as the old style direct -TCP replication config is deprecated. - -To migrate to Redis add the `redis` config as above, and optionally remove the -TCP `replication` listener from master and `worker_replication_port` from worker -config. +A main change that has occurred is the merging of worker apps into +`synapse.app.generic_worker`. This change is backwards compatible and so no +changes to the config are required. To migrate apps to use `synapse.app.generic_worker` simply update the `worker_app` option in the worker configs, and where worker are started (e.g. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e57a926032..883f2fd2ec 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -57,7 +57,6 @@ from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource from synapse.rest.admin import AdminRestResource from synapse.rest.health import HealthResource @@ -290,16 +289,6 @@ class SynapseHomeServer(HomeServer): manhole_settings=self.config.server.manhole_settings, manhole_globals={"hs": self}, ) - elif listener.type == "replication": - services = listen_tcp( - listener.bind_addresses, - listener.port, - ReplicationStreamProtocolFactory(self), - ) - for s in services: - self.get_reactor().addSystemEventTrigger( - "before", "shutdown", s.stopListening - ) elif listener.type == "metrics": if not self.config.metrics.enable_metrics: logger.warning( diff --git a/synapse/config/server.py b/synapse/config/server.py index 085fe22c51..c91df636d9 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -36,6 +36,12 @@ from ._util import validate_config logger = logging.Logger(__name__) +DIRECT_TCP_ERROR = """ +Using direct TCP replication for workers is no longer supported. + +Please see https://matrix-org.github.io/synapse/latest/upgrade.html#direct-tcp-replication-is-no-longer-supported-migrate-to-redis +""" + # by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes # (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen # on IPv6 when '::' is set. @@ -165,7 +171,6 @@ KNOWN_LISTENER_TYPES = { "http", "metrics", "manhole", - "replication", } KNOWN_RESOURCES = { @@ -515,7 +520,9 @@ class ServerConfig(Config): ): raise ConfigError("allowed_avatar_mimetypes must be a list") - self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])] + self.listeners = [ + parse_listener_def(i, x) for i, x in enumerate(config.get("listeners", [])) + ] # no_tls is not really supported any more, but let's grandfather it in # here. @@ -880,9 +887,12 @@ def read_gc_thresholds( ) -def parse_listener_def(listener: Any) -> ListenerConfig: +def parse_listener_def(num: int, listener: Any) -> ListenerConfig: """parse a listener config from the config file""" listener_type = listener["type"] + # Raise a helpful error if direct TCP replication is still configured. + if listener_type == "replication": + raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type")) port = listener.get("port") if not isinstance(port, int): diff --git a/synapse/config/workers.py b/synapse/config/workers.py index f2716422b5..0fb725dd8f 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -27,7 +27,7 @@ from ._base import ( RoutableShardedWorkerHandlingConfig, ShardedWorkerHandlingConfig, ) -from .server import ListenerConfig, parse_listener_def +from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def _FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """ The send_federation config option must be disabled in the main @@ -128,7 +128,8 @@ class WorkerConfig(Config): self.worker_app = None self.worker_listeners = [ - parse_listener_def(x) for x in config.get("worker_listeners", []) + parse_listener_def(i, x) + for i, x in enumerate(config.get("worker_listeners", [])) ] self.worker_daemonize = bool(config.get("worker_daemonize")) self.worker_pid_file = config.get("worker_pid_file") @@ -142,7 +143,8 @@ class WorkerConfig(Config): self.worker_replication_host = config.get("worker_replication_host", None) # The port on the main synapse for TCP replication - self.worker_replication_port = config.get("worker_replication_port", None) + if "worker_replication_port" in config: + raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",)) # The port on the main synapse for HTTP replication endpoint self.worker_replication_http_port = config.get("worker_replication_http_port") diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index e1cbfa50eb..0f166d16aa 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -35,7 +35,6 @@ from twisted.internet.protocol import ReconnectingClientFactory from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.replication.tcp.client import DirectTcpReplicationClientFactory from synapse.replication.tcp.commands import ( ClearUserSyncsCommand, Command, @@ -332,46 +331,31 @@ class ReplicationCommandHandler: def start_replication(self, hs: "HomeServer") -> None: """Helper method to start replication.""" - if hs.config.redis.redis_enabled: - from synapse.replication.tcp.redis import ( - RedisDirectTcpReplicationClientFactory, - ) + from synapse.replication.tcp.redis import RedisDirectTcpReplicationClientFactory - # First let's ensure that we have a ReplicationStreamer started. - hs.get_replication_streamer() + # First let's ensure that we have a ReplicationStreamer started. + hs.get_replication_streamer() - # We need two connections to redis, one for the subscription stream and - # one to send commands to (as you can't send further redis commands to a - # connection after SUBSCRIBE is called). + # We need two connections to redis, one for the subscription stream and + # one to send commands to (as you can't send further redis commands to a + # connection after SUBSCRIBE is called). - # First create the connection for sending commands. - outbound_redis_connection = hs.get_outbound_redis_connection() + # First create the connection for sending commands. + outbound_redis_connection = hs.get_outbound_redis_connection() - # Now create the factory/connection for the subscription stream. - self._factory = RedisDirectTcpReplicationClientFactory( - hs, - outbound_redis_connection, - channel_names=self._channels_to_subscribe_to, - ) - hs.get_reactor().connectTCP( - hs.config.redis.redis_host, - hs.config.redis.redis_port, - self._factory, - timeout=30, - bindAddress=None, - ) - else: - client_name = hs.get_instance_name() - self._factory = DirectTcpReplicationClientFactory(hs, client_name, self) - host = hs.config.worker.worker_replication_host - port = hs.config.worker.worker_replication_port - hs.get_reactor().connectTCP( - host, - port, - self._factory, - timeout=30, - bindAddress=None, - ) + # Now create the factory/connection for the subscription stream. + self._factory = RedisDirectTcpReplicationClientFactory( + hs, + outbound_redis_connection, + channel_names=self._channels_to_subscribe_to, + ) + hs.get_reactor().connectTCP( + hs.config.redis.redis_host, + hs.config.redis.redis_port, + self._factory, + timeout=30, + bindAddress=None, + ) def get_streams(self) -> Dict[str, Stream]: """Get a map from stream name to all streams.""" diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 264e101082..c7dae58eb5 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -61,7 +61,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): } # Listen with the config - self.hs._listen_http(parse_listener_def(config)) + self.hs._listen_http(parse_listener_def(0, config)) # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] @@ -109,7 +109,7 @@ class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): } # Listen with the config - self.hs._listener_http(self.hs.config, parse_listener_def(config)) + self.hs._listener_http(self.hs.config, parse_listener_def(0, config)) # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] diff --git a/tests/test_server.py b/tests/test_server.py index 23975d59c3..7c66448245 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -228,7 +228,7 @@ class OptionsResourceTests(unittest.TestCase): site = SynapseSite( "test", "site_tag", - parse_listener_def({"type": "http", "port": 0}), + parse_listener_def(0, {"type": "http", "port": 0}), self.resource, "1.0", max_request_body_size=4096, diff --git a/tests/utils.py b/tests/utils.py index d2c6d1e852..65db437697 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -135,7 +135,6 @@ def default_config( "enable_registration_captcha": False, "macaroon_secret_key": "not even a little secret", "password_providers": [], - "worker_replication_url": "", "worker_app": None, "block_non_admin_invites": False, "federation_domain_whitelist": None, -- cgit 1.4.1 From cdf7fb737b12cc06cc5693806ee8fa21ade9723d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Sep 2022 09:01:26 +0100 Subject: 1.67.0rc1 --- CHANGES.md | 84 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/13241.removal | 1 - changelog.d/13483.misc | 1 - changelog.d/13509.bugfix | 1 - changelog.d/13540.misc | 1 - changelog.d/13546.bugfix | 1 - changelog.d/13569.removal | 1 - changelog.d/13573.misc | 1 - changelog.d/13575.misc | 1 - changelog.d/13583.bugfix | 1 - changelog.d/13585.bugfix | 1 - changelog.d/13586.misc | 1 - changelog.d/13588.misc | 1 - changelog.d/13591.misc | 1 - changelog.d/13592.misc | 1 - changelog.d/13597.misc | 1 - changelog.d/13600.misc | 1 - changelog.d/13602.doc | 1 - changelog.d/13603.misc | 1 - changelog.d/13605.misc | 1 - changelog.d/13606.misc | 1 - changelog.d/13608.misc | 1 - changelog.d/13614.feature | 1 - changelog.d/13615.feature | 1 - changelog.d/13616.bugfix | 1 - changelog.d/13617.doc | 1 - changelog.d/13627.misc | 1 - changelog.d/13632.bugfix | 1 - changelog.d/13634.feature | 1 - changelog.d/13639.misc | 1 - changelog.d/13640.doc | 1 - changelog.d/13645.doc | 1 - changelog.d/13647.removal | 1 - changelog.d/13653.removal | 1 - changelog.d/13656.doc | 1 - changelog.d/13657.bugfix | 1 - changelog.d/13658.bugfix | 1 - changelog.d/13660.bugfix | 1 - changelog.d/13662.misc | 1 - changelog.d/13665.doc | 1 - changelog.d/13671.misc | 1 - changelog.d/13678.doc | 1 - changelog.d/13679.misc | 1 - changelog.d/13683.bugfix | 1 - changelog.d/13688.docker | 1 - changelog.d/13689.misc | 1 - changelog.d/13692.removal | 1 - changelog.d/13693.misc | 1 - changelog.d/13694.bugfix | 1 - changelog.d/13697.misc | 1 - changelog.d/13698.misc | 1 - changelog.d/13701.doc | 1 - changelog.d/13712.misc | 1 - changelog.d/13713.misc | 1 - debian/changelog | 8 +++-- pyproject.toml | 2 +- 56 files changed, 91 insertions(+), 56 deletions(-) delete mode 100644 changelog.d/13241.removal delete mode 100644 changelog.d/13483.misc delete mode 100644 changelog.d/13509.bugfix delete mode 100644 changelog.d/13540.misc delete mode 100644 changelog.d/13546.bugfix delete mode 100644 changelog.d/13569.removal delete mode 100644 changelog.d/13573.misc delete mode 100644 changelog.d/13575.misc delete mode 100644 changelog.d/13583.bugfix delete mode 100644 changelog.d/13585.bugfix delete mode 100644 changelog.d/13586.misc delete mode 100644 changelog.d/13588.misc delete mode 100644 changelog.d/13591.misc delete mode 100644 changelog.d/13592.misc delete mode 100644 changelog.d/13597.misc delete mode 100644 changelog.d/13600.misc delete mode 100644 changelog.d/13602.doc delete mode 100644 changelog.d/13603.misc delete mode 100644 changelog.d/13605.misc delete mode 100644 changelog.d/13606.misc delete mode 100644 changelog.d/13608.misc delete mode 100644 changelog.d/13614.feature delete mode 100644 changelog.d/13615.feature delete mode 100644 changelog.d/13616.bugfix delete mode 100644 changelog.d/13617.doc delete mode 100644 changelog.d/13627.misc delete mode 100644 changelog.d/13632.bugfix delete mode 100644 changelog.d/13634.feature delete mode 100644 changelog.d/13639.misc delete mode 100644 changelog.d/13640.doc delete mode 100644 changelog.d/13645.doc delete mode 100644 changelog.d/13647.removal delete mode 100644 changelog.d/13653.removal delete mode 100644 changelog.d/13656.doc delete mode 100644 changelog.d/13657.bugfix delete mode 100644 changelog.d/13658.bugfix delete mode 100644 changelog.d/13660.bugfix delete mode 100644 changelog.d/13662.misc delete mode 100644 changelog.d/13665.doc delete mode 100644 changelog.d/13671.misc delete mode 100644 changelog.d/13678.doc delete mode 100644 changelog.d/13679.misc delete mode 100644 changelog.d/13683.bugfix delete mode 100644 changelog.d/13688.docker delete mode 100644 changelog.d/13689.misc delete mode 100644 changelog.d/13692.removal delete mode 100644 changelog.d/13693.misc delete mode 100644 changelog.d/13694.bugfix delete mode 100644 changelog.d/13697.misc delete mode 100644 changelog.d/13698.misc delete mode 100644 changelog.d/13701.doc delete mode 100644 changelog.d/13712.misc delete mode 100644 changelog.d/13713.misc diff --git a/CHANGES.md b/CHANGES.md index 0b10e90186..3bb00b6cea 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,87 @@ +Synapse 1.67.0rc1 (2022-09-06) +============================== + +Features +-------- + +- Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. ([\#13614](https://github.com/matrix-org/synapse/issues/13614)) +- Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. ([\#13615](https://github.com/matrix-org/synapse/issues/13615)) +- Improve performance of sending messages in rooms with thousands of local users. ([\#13634](https://github.com/matrix-org/synapse/issues/13634)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. ([\#13509](https://github.com/matrix-org/synapse/issues/13509)) +- Fix bug that user cannot `/forget` rooms after the last member has left the room. ([\#13546](https://github.com/matrix-org/synapse/issues/13546)) +- Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583)) +- Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585)) +- Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616)) +- Fix the running of MSC1763 retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632)) +- Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657)) +- Fix MSC3030 `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658)) +- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660)) +- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683)) +- Fix a bug introduced in Synapse v1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694)) + + +Updates to the Docker image +--------------------------- + +- Update docker image to use a stable version of poetry. ([\#13688](https://github.com/matrix-org/synapse/issues/13688)) + + +Improved Documentation +---------------------- + +- Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. ([\#13602](https://github.com/matrix-org/synapse/issues/13602)) +- Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. ([\#13617](https://github.com/matrix-org/synapse/issues/13617)) +- Improve documentation around user registration. ([\#13640](https://github.com/matrix-org/synapse/issues/13640)) +- Remove documentation of legacy `frontend_proxy` worker app. ([\#13645](https://github.com/matrix-org/synapse/issues/13645)) +- Clarify documentation that HTTP replication traffic can be protected with a shared secret. ([\#13656](https://github.com/matrix-org/synapse/issues/13656)) +- Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. ([\#13665](https://github.com/matrix-org/synapse/issues/13665)) +- Update docs to make enabling metrics more clear. ([\#13678](https://github.com/matrix-org/synapse/issues/13678)) +- Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. ([\#13701](https://github.com/matrix-org/synapse/issues/13701)) + + +Deprecations and Removals +------------------------- + +- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241)) +- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569)) +- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse v1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647)) +- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692)) + + +Internal Changes +---------------- + +- Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. ([\#13483](https://github.com/matrix-org/synapse/issues/13483)) +- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13540](https://github.com/matrix-org/synapse/issues/13540)) +- Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). ([\#13573](https://github.com/matrix-org/synapse/issues/13573), [\#13600](https://github.com/matrix-org/synapse/issues/13600)) +- Optimize how Synapse calculates domains to fetch from during backfill. ([\#13575](https://github.com/matrix-org/synapse/issues/13575)) +- Comment about a better future where we can get the state diff between two events. ([\#13586](https://github.com/matrix-org/synapse/issues/13586)) +- Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. ([\#13588](https://github.com/matrix-org/synapse/issues/13588)) +- Improve performance of `@cachedList`. ([\#13591](https://github.com/matrix-org/synapse/issues/13591)) +- Minor speed up of fetching large numbers of push rules. ([\#13592](https://github.com/matrix-org/synapse/issues/13592)) +- Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13597](https://github.com/matrix-org/synapse/issues/13597)) +- Rename `event_map` to `unpersisted_events` when computing the auth differences. ([\#13603](https://github.com/matrix-org/synapse/issues/13603)) +- Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. ([\#13605](https://github.com/matrix-org/synapse/issues/13605)) +- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. ([\#13606](https://github.com/matrix-org/synapse/issues/13606)) +- Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. ([\#13608](https://github.com/matrix-org/synapse/issues/13608)) +- Drop unused column `application_services_state.last_txn`. ([\#13627](https://github.com/matrix-org/synapse/issues/13627)) +- Improve readability of Complement CI logs by printing failure results last. ([\#13639](https://github.com/matrix-org/synapse/issues/13639)) +- Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. ([\#13662](https://github.com/matrix-org/synapse/issues/13662)) +- Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. ([\#13671](https://github.com/matrix-org/synapse/issues/13671)) +- Add some logging to help track down #13444. ([\#13679](https://github.com/matrix-org/synapse/issues/13679)) +- Update poetry lock file for v1.2.0. ([\#13689](https://github.com/matrix-org/synapse/issues/13689)) +- Add cache to `is_partial_state_room`. ([\#13693](https://github.com/matrix-org/synapse/issues/13693)) +- Update the Grafana dashboard that is included with Synapse in the `contrib` directory. ([\#13697](https://github.com/matrix-org/synapse/issues/13697)) +- Only run trial CI on all python versions on non-PRs. ([\#13698](https://github.com/matrix-org/synapse/issues/13698)) +- Fix typechecking with latest types-jsonschema. ([\#13712](https://github.com/matrix-org/synapse/issues/13712)) +- Reduce number of CI checks we run for PRs. ([\#13713](https://github.com/matrix-org/synapse/issues/13713)) + + Synapse 1.66.0 (2022-08-31) =========================== diff --git a/changelog.d/13241.removal b/changelog.d/13241.removal deleted file mode 100644 index 60b0e7969c..0000000000 --- a/changelog.d/13241.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. \ No newline at end of file diff --git a/changelog.d/13483.misc b/changelog.d/13483.misc deleted file mode 100644 index 4fe6dbbea4..0000000000 --- a/changelog.d/13483.misc +++ /dev/null @@ -1 +0,0 @@ -Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. \ No newline at end of file diff --git a/changelog.d/13509.bugfix b/changelog.d/13509.bugfix deleted file mode 100644 index 6dcb9741d9..0000000000 --- a/changelog.d/13509.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. diff --git a/changelog.d/13540.misc b/changelog.d/13540.misc deleted file mode 100644 index 07ace50b12..0000000000 --- a/changelog.d/13540.misc +++ /dev/null @@ -1 +0,0 @@ -Add experimental configuration option to allow disabling legacy Prometheus metric names. \ No newline at end of file diff --git a/changelog.d/13546.bugfix b/changelog.d/13546.bugfix deleted file mode 100644 index 83bc3a61d2..0000000000 --- a/changelog.d/13546.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug that user cannot `/forget` rooms after the last member has left the room. \ No newline at end of file diff --git a/changelog.d/13569.removal b/changelog.d/13569.removal deleted file mode 100644 index af9d407671..0000000000 --- a/changelog.d/13569.removal +++ /dev/null @@ -1 +0,0 @@ -Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13573.misc b/changelog.d/13573.misc deleted file mode 100644 index 1ce9c0c081..0000000000 --- a/changelog.d/13573.misc +++ /dev/null @@ -1 +0,0 @@ -Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13575.misc b/changelog.d/13575.misc deleted file mode 100644 index 3841472617..0000000000 --- a/changelog.d/13575.misc +++ /dev/null @@ -1 +0,0 @@ -Optimize how Synapse calculates domains to fetch from during backfill. diff --git a/changelog.d/13583.bugfix b/changelog.d/13583.bugfix deleted file mode 100644 index 1e4ce5904b..0000000000 --- a/changelog.d/13583.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. \ No newline at end of file diff --git a/changelog.d/13585.bugfix b/changelog.d/13585.bugfix deleted file mode 100644 index 664b986c59..0000000000 --- a/changelog.d/13585.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix loading the current stream position behind the actual position. diff --git a/changelog.d/13586.misc b/changelog.d/13586.misc deleted file mode 100644 index daa71470f6..0000000000 --- a/changelog.d/13586.misc +++ /dev/null @@ -1 +0,0 @@ -Comment about a better future where we can get the state diff between two events. diff --git a/changelog.d/13588.misc b/changelog.d/13588.misc deleted file mode 100644 index eca1416ceb..0000000000 --- a/changelog.d/13588.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. diff --git a/changelog.d/13591.misc b/changelog.d/13591.misc deleted file mode 100644 index 080e865e55..0000000000 --- a/changelog.d/13591.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of `@cachedList`. diff --git a/changelog.d/13592.misc b/changelog.d/13592.misc deleted file mode 100644 index 8f48d557e5..0000000000 --- a/changelog.d/13592.misc +++ /dev/null @@ -1 +0,0 @@ -Minor speed up of fetching large numbers of push rules. diff --git a/changelog.d/13597.misc b/changelog.d/13597.misc deleted file mode 100644 index eb5e971008..0000000000 --- a/changelog.d/13597.misc +++ /dev/null @@ -1 +0,0 @@ - Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13600.misc b/changelog.d/13600.misc deleted file mode 100644 index 1ce9c0c081..0000000000 --- a/changelog.d/13600.misc +++ /dev/null @@ -1 +0,0 @@ -Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13602.doc b/changelog.d/13602.doc deleted file mode 100644 index dbba082163..0000000000 --- a/changelog.d/13602.doc +++ /dev/null @@ -1 +0,0 @@ -Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. diff --git a/changelog.d/13603.misc b/changelog.d/13603.misc deleted file mode 100644 index d08eb6cc0a..0000000000 --- a/changelog.d/13603.misc +++ /dev/null @@ -1 +0,0 @@ -Rename `event_map` to `unpersisted_events` when computing the auth differences. diff --git a/changelog.d/13605.misc b/changelog.d/13605.misc deleted file mode 100644 index 88d518383b..0000000000 --- a/changelog.d/13605.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. diff --git a/changelog.d/13606.misc b/changelog.d/13606.misc deleted file mode 100644 index 58a4467798..0000000000 --- a/changelog.d/13606.misc +++ /dev/null @@ -1 +0,0 @@ -Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. diff --git a/changelog.d/13608.misc b/changelog.d/13608.misc deleted file mode 100644 index 19bcc45e33..0000000000 --- a/changelog.d/13608.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. diff --git a/changelog.d/13614.feature b/changelog.d/13614.feature deleted file mode 100644 index fa177ead09..0000000000 --- a/changelog.d/13614.feature +++ /dev/null @@ -1 +0,0 @@ -Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. diff --git a/changelog.d/13615.feature b/changelog.d/13615.feature deleted file mode 100644 index c2c568f1eb..0000000000 --- a/changelog.d/13615.feature +++ /dev/null @@ -1 +0,0 @@ -Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. diff --git a/changelog.d/13616.bugfix b/changelog.d/13616.bugfix deleted file mode 100644 index f2c48d1d8d..0000000000 --- a/changelog.d/13616.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. diff --git a/changelog.d/13617.doc b/changelog.d/13617.doc deleted file mode 100644 index 5c7db7c3d7..0000000000 --- a/changelog.d/13617.doc +++ /dev/null @@ -1 +0,0 @@ -Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. diff --git a/changelog.d/13627.misc b/changelog.d/13627.misc deleted file mode 100644 index 1a15709aff..0000000000 --- a/changelog.d/13627.misc +++ /dev/null @@ -1 +0,0 @@ -Drop unused column `application_services_state.last_txn`. diff --git a/changelog.d/13632.bugfix b/changelog.d/13632.bugfix deleted file mode 100644 index e4b7b403cd..0000000000 --- a/changelog.d/13632.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the running of MSC1763 retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. diff --git a/changelog.d/13634.feature b/changelog.d/13634.feature deleted file mode 100644 index 0a8827205d..0000000000 --- a/changelog.d/13634.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending messages in rooms with thousands of local users. diff --git a/changelog.d/13639.misc b/changelog.d/13639.misc deleted file mode 100644 index de4e4d1206..0000000000 --- a/changelog.d/13639.misc +++ /dev/null @@ -1 +0,0 @@ -Improve readability of Complement CI logs by printing failure results last. diff --git a/changelog.d/13640.doc b/changelog.d/13640.doc deleted file mode 100644 index fa049371cf..0000000000 --- a/changelog.d/13640.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation around user registration. diff --git a/changelog.d/13645.doc b/changelog.d/13645.doc deleted file mode 100644 index 04c302ec2c..0000000000 --- a/changelog.d/13645.doc +++ /dev/null @@ -1 +0,0 @@ -Remove documentation of legacy `frontend_proxy` worker app. diff --git a/changelog.d/13647.removal b/changelog.d/13647.removal deleted file mode 100644 index 0190a65dba..0000000000 --- a/changelog.d/13647.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse v1.18.0. Workers now require using Redis. diff --git a/changelog.d/13653.removal b/changelog.d/13653.removal deleted file mode 100644 index eb075d4517..0000000000 --- a/changelog.d/13653.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). diff --git a/changelog.d/13656.doc b/changelog.d/13656.doc deleted file mode 100644 index 61013a0daf..0000000000 --- a/changelog.d/13656.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify documentation that HTTP replication traffic can be protected with a shared secret. diff --git a/changelog.d/13657.bugfix b/changelog.d/13657.bugfix deleted file mode 100644 index d314d9c52f..0000000000 --- a/changelog.d/13657.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. diff --git a/changelog.d/13658.bugfix b/changelog.d/13658.bugfix deleted file mode 100644 index 8740f066bb..0000000000 --- a/changelog.d/13658.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix MSC3030 `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. diff --git a/changelog.d/13660.bugfix b/changelog.d/13660.bugfix deleted file mode 100644 index 43859a4d65..0000000000 --- a/changelog.d/13660.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. diff --git a/changelog.d/13662.misc b/changelog.d/13662.misc deleted file mode 100644 index 3dea4a1c2c..0000000000 --- a/changelog.d/13662.misc +++ /dev/null @@ -1 +0,0 @@ -Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. \ No newline at end of file diff --git a/changelog.d/13665.doc b/changelog.d/13665.doc deleted file mode 100644 index 6ee6434662..0000000000 --- a/changelog.d/13665.doc +++ /dev/null @@ -1 +0,0 @@ -Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. diff --git a/changelog.d/13671.misc b/changelog.d/13671.misc deleted file mode 100644 index f1c62b5b1e..0000000000 --- a/changelog.d/13671.misc +++ /dev/null @@ -1 +0,0 @@ -Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. diff --git a/changelog.d/13678.doc b/changelog.d/13678.doc deleted file mode 100644 index 8b85dfe643..0000000000 --- a/changelog.d/13678.doc +++ /dev/null @@ -1 +0,0 @@ -Update docs to make enabling metrics more clear. diff --git a/changelog.d/13679.misc b/changelog.d/13679.misc deleted file mode 100644 index a4fa94da9d..0000000000 --- a/changelog.d/13679.misc +++ /dev/null @@ -1 +0,0 @@ -Add some logging to help track down #13444. diff --git a/changelog.d/13683.bugfix b/changelog.d/13683.bugfix deleted file mode 100644 index 538534fec1..0000000000 --- a/changelog.d/13683.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. diff --git a/changelog.d/13688.docker b/changelog.d/13688.docker deleted file mode 100644 index 8935e2536f..0000000000 --- a/changelog.d/13688.docker +++ /dev/null @@ -1 +0,0 @@ -Update docker image to use a stable version of poetry. diff --git a/changelog.d/13689.misc b/changelog.d/13689.misc deleted file mode 100644 index db6e48b150..0000000000 --- a/changelog.d/13689.misc +++ /dev/null @@ -1 +0,0 @@ -Update poetry lock file for v1.2.0. diff --git a/changelog.d/13692.removal b/changelog.d/13692.removal deleted file mode 100644 index eb075d4517..0000000000 --- a/changelog.d/13692.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). diff --git a/changelog.d/13693.misc b/changelog.d/13693.misc deleted file mode 100644 index 31490191c9..0000000000 --- a/changelog.d/13693.misc +++ /dev/null @@ -1 +0,0 @@ -Add cache to `is_partial_state_room`. diff --git a/changelog.d/13694.bugfix b/changelog.d/13694.bugfix deleted file mode 100644 index 48b9bb5f0a..0000000000 --- a/changelog.d/13694.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. diff --git a/changelog.d/13697.misc b/changelog.d/13697.misc deleted file mode 100644 index b9d2a60961..0000000000 --- a/changelog.d/13697.misc +++ /dev/null @@ -1 +0,0 @@ -Update the Grafana dashboard that is included with Synapse in the `contrib` directory. \ No newline at end of file diff --git a/changelog.d/13698.misc b/changelog.d/13698.misc deleted file mode 100644 index 20b6b6d084..0000000000 --- a/changelog.d/13698.misc +++ /dev/null @@ -1 +0,0 @@ -Only run trial CI on all python versions on non-PRs. diff --git a/changelog.d/13701.doc b/changelog.d/13701.doc deleted file mode 100644 index b438e066d8..0000000000 --- a/changelog.d/13701.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. diff --git a/changelog.d/13712.misc b/changelog.d/13712.misc deleted file mode 100644 index 2c4f6b19f6..0000000000 --- a/changelog.d/13712.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typechecking with latest types-jsonschema. diff --git a/changelog.d/13713.misc b/changelog.d/13713.misc deleted file mode 100644 index 1044099542..0000000000 --- a/changelog.d/13713.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce number of CI checks we run for PRs. diff --git a/debian/changelog b/debian/changelog index 2b7b329b6b..4fa54dc5ee 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -matrix-synapse-py3 (1.66.0ubuntu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.67.0~rc1) stable; urgency=medium + [ Erik Johnston ] * Use stable poetry 1.2.0 version, rather than a prerelease. - -- Erik Johnston Thu, 01 Sep 2022 13:48:31 +0100 + [ Synapse Packaging team ] + * New Synapse release 1.67.0rc1. + + -- Synapse Packaging team Tue, 06 Sep 2022 09:01:06 +0100 matrix-synapse-py3 (1.66.0) stable; urgency=medium diff --git a/pyproject.toml b/pyproject.toml index a41d88ea74..dfb5c788be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.66.0" +version = "1.67.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From 571f565c1fa1e00462f9c96c3907d3304a37e39f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Sep 2022 09:25:32 +0100 Subject: Update changelog --- CHANGES.md | 13 +++++++++++++ docs/upgrade.md | 9 +++++++++ 2 files changed, 22 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 3bb00b6cea..f0d3db7849 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,19 @@ Synapse 1.67.0rc1 (2022-09-06) ============================== +This release removes using the deprecated direct TCP replication configuration +for workers. Server admins should use Redis instead. See the [upgrade +notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). + +The minimum version of `poetry` supported for managing source checkouts is now +1.2.0. + +Notice: from the next major release (v1.68.0) installing Synapse from a source +checkout will require a recent Rust compiler. Those using packages or +`pip install matrix-synapse` will not be affected. See the [upgrade +notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). + + Features -------- diff --git a/docs/upgrade.md b/docs/upgrade.md index c6219d06e8..023ca0a30b 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -111,6 +111,15 @@ and remove the TCP `replication` listener from config of the master and The minimum supported version of poetry is now 1.2. This should only affect those installing from a source checkout. +## Rust requirement in the next release + +From the next major release (v1.68.0) installing Synapse from a source checkout +will require a recent Rust compiler. Those using packages or +`pip install matrix-synapse` will not be affected. + +The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/) + + # Upgrading to v1.66.0 ## Delegation of email validation no longer supported -- cgit 1.4.1 From b455c2a5ec4874a6897c0448631d8ab8f36f9339 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 6 Sep 2022 11:21:21 +0000 Subject: Update Grafana dashboard to not use legacy metric names. (#13714) --- changelog.d/13714.misc | 1 + contrib/grafana/synapse.json | 138 +++++++++++++++++----------------- synapse/metrics/_legacy_exposition.py | 4 +- synapse/util/caches/__init__.py | 4 +- 4 files changed, 74 insertions(+), 73 deletions(-) create mode 100644 changelog.d/13714.misc diff --git a/changelog.d/13714.misc b/changelog.d/13714.misc new file mode 100644 index 0000000000..07ace50b12 --- /dev/null +++ b/changelog.d/13714.misc @@ -0,0 +1 @@ +Add experimental configuration option to allow disabling legacy Prometheus metric names. \ No newline at end of file diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index 248cd6d9ad..58061e2fce 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -335,7 +335,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))", "hide": false, "instant": false, "legendFormat": "Events", @@ -1423,7 +1423,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "hide": false, "instant": false, @@ -1804,7 +1804,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)", + "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size])) without (job,index)", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -2437,7 +2437,7 @@ "uid": "$datasource" }, "exemplar": false, - "expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds{instance=\"$instance\"}[1m]))", + "expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))", "format": "time_series", "hide": false, "instant": false, @@ -2451,7 +2451,7 @@ "uid": "$datasource" }, "exemplar": false, - "expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds{instance=\"$instance\"}[1m]))", + "expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))", "format": "time_series", "hide": false, "instant": false, @@ -3425,7 +3425,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -3518,7 +3518,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_background_process_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -3726,7 +3726,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "successful txn rate", @@ -3736,7 +3736,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_util_metrics_block_count{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_util_metrics_block_count_total{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))", "legendFormat": "failed txn rate", "refId": "B" } @@ -3826,7 +3826,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_federation_server_received_pdus{instance=~\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_federation_server_received_pdus_total{instance=~\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "pdus", @@ -3836,7 +3836,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_federation_server_received_edus{instance=~\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_federation_server_received_edus_total{instance=~\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "edus", @@ -3928,7 +3928,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total{instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -3939,7 +3939,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_federation_client_sent_edus{instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_federation_client_sent_edus_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "edus", @@ -5042,7 +5042,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_http_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0", + "expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -5054,7 +5054,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_http_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0", + "expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", "format": "time_series", "intervalFactor": 2, "legendFormat": "failed {{job}}", @@ -5268,12 +5268,12 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, "legendFormat": "{{index}}", - "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total", "refId": "A", "step": 2 } @@ -5369,12 +5369,12 @@ "uid": "$datasource" }, "exemplar": true, - "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, "legendFormat": "{{index}}", - "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total", "refId": "A", "step": 2 } @@ -5475,12 +5475,12 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, "legendFormat": "Hit Rate", - "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total", "refId": "A", "step": 2 }, @@ -5490,7 +5490,7 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -5598,12 +5598,12 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, "legendFormat": "Hit Rate", - "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total", "refId": "A", "step": 2 }, @@ -5613,7 +5613,7 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -5719,12 +5719,12 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, "legendFormat": "Hit Rate", - "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", + "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total", "refId": "A", "step": 2 }, @@ -5734,7 +5734,7 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))", + "expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -6087,7 +6087,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "expr": "topk(10, rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -6187,7 +6187,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "instant": false, "interval": "", @@ -6287,7 +6287,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "instant": false, "interval": "", @@ -6538,7 +6538,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])", + "expr": "rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -6636,7 +6636,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])", + "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])) / rate(synapse_util_metrics_block_count_total[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -6737,7 +6737,7 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -6839,7 +6839,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -6936,7 +6936,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -7033,7 +7033,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_util_metrics_block_time_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -7122,7 +7122,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "interval": "", "legendFormat": "{{job}}-{{index}} {{block_name}}", "refId": "A" @@ -7246,7 +7246,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{name}} {{job}}-{{index}}", @@ -7347,7 +7347,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "synapse_util_caches_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "hide": false, "interval": "", @@ -7447,7 +7447,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -7547,7 +7547,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "topk(10, rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", + "expr": "topk(10, rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -7643,7 +7643,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_util_caches_cache_evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -7763,7 +7763,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "synapse_util_caches_response_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "synapse_util_caches_response_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", "legendFormat": "{{name}} {{job}}-{{index}}", "refId": "A" @@ -7853,7 +7853,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_util_caches_response_cache:hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache:total{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_util_caches_response_cache_hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])", "interval": "", "legendFormat": "{{name}} {{job}}-{{index}}", "refId": "A" @@ -9556,7 +9556,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", + "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", "intervalFactor": 1, "legendFormat": "{{le}}", @@ -9716,7 +9716,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", + "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", "intervalFactor": 1, "legendFormat": "{{le}}", @@ -9793,7 +9793,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", "intervalFactor": 1, "legendFormat": "50%", @@ -9803,7 +9803,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", "intervalFactor": 1, "legendFormat": "75%", @@ -9813,7 +9813,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", "intervalFactor": 1, "legendFormat": "90%", @@ -9823,7 +9823,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", "intervalFactor": 1, "legendFormat": "99%", @@ -9905,7 +9905,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", + "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", "intervalFactor": 1, "legendFormat": "{{le}}", @@ -9982,7 +9982,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", "intervalFactor": 1, "legendFormat": "50%", @@ -9992,7 +9992,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", "intervalFactor": 1, "legendFormat": "75%", @@ -10002,7 +10002,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", "intervalFactor": 1, "legendFormat": "90%", @@ -10012,7 +10012,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", "intervalFactor": 1, "legendFormat": "99%", @@ -10297,7 +10297,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", "legendFormat": "State res ", "refId": "A" @@ -10306,7 +10306,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", "legendFormat": "Potential to prune", "refId": "B" @@ -10315,7 +10315,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_storage_events_times_pruned_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "expr": "sum(rate(synapse_storage_events_times_pruned_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", "legendFormat": "Pruned", "refId": "C" @@ -11069,7 +11069,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_handler_presence_notified_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "rate(synapse_handler_presence_notified_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Notified", "refId": "A" @@ -11078,7 +11078,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_handler_presence_federation_presence_out{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "rate(synapse_handler_presence_federation_presence_out_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Remote ping", "refId": "B" @@ -11087,7 +11087,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_handler_presence_presence_updates{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "rate(synapse_handler_presence_presence_updates_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Total updates", "refId": "C" @@ -11096,7 +11096,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_handler_presence_federation_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "rate(synapse_handler_presence_federation_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Remote updates", "refId": "D" @@ -11105,7 +11105,7 @@ "datasource": { "uid": "$datasource" }, - "expr": "rate(synapse_handler_presence_bump_active_time{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "rate(synapse_handler_presence_bump_active_time_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", "legendFormat": "Bump active time", "refId": "E" @@ -11789,7 +11789,7 @@ "name": "instance", "options": [], "query": { - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, instance)", "refId": "Prometheus-instance-Variable-Query" }, "refresh": 2, @@ -11818,7 +11818,7 @@ "name": "job", "options": [], "query": { - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, job)", "refId": "Prometheus-job-Variable-Query" }, "refresh": 2, @@ -11848,7 +11848,7 @@ "name": "index", "options": [], "query": { - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, index)", "refId": "Prometheus-index-Variable-Query" }, "refresh": 2, @@ -11896,6 +11896,6 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "version": 132, + "version": 133, "weekStart": "" -} \ No newline at end of file +} diff --git a/synapse/metrics/_legacy_exposition.py b/synapse/metrics/_legacy_exposition.py index ff640a49af..6f00ff2a47 100644 --- a/synapse/metrics/_legacy_exposition.py +++ b/synapse/metrics/_legacy_exposition.py @@ -88,11 +88,11 @@ LEGACY_METRIC_NAMES = { "synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits", "synapse_util_caches_cache_size": "synapse_util_caches_cache:size", "synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size", - "synapse_util_caches_cache_total": "synapse_util_caches_cache:total", + "synapse_util_caches_cache": "synapse_util_caches_cache:total", "synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size", "synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits", "synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size", - "synapse_util_caches_response_cache_total": "synapse_util_caches_response_cache:total", + "synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total", } diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index bdf9b0dc8c..d4a2b77c29 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -37,7 +37,7 @@ collectors_by_name: Dict[str, "CacheMetric"] = {} cache_size = Gauge("synapse_util_caches_cache_size", "", ["name"]) cache_hits = Gauge("synapse_util_caches_cache_hits", "", ["name"]) cache_evicted = Gauge("synapse_util_caches_cache_evicted_size", "", ["name", "reason"]) -cache_total = Gauge("synapse_util_caches_cache_total", "", ["name"]) +cache_total = Gauge("synapse_util_caches_cache", "", ["name"]) cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"]) cache_memory_usage = Gauge( "synapse_util_caches_cache_size_bytes", @@ -50,7 +50,7 @@ response_cache_hits = Gauge("synapse_util_caches_response_cache_hits", "", ["nam response_cache_evicted = Gauge( "synapse_util_caches_response_cache_evicted_size", "", ["name", "reason"] ) -response_cache_total = Gauge("synapse_util_caches_response_cache_total", "", ["name"]) +response_cache_total = Gauge("synapse_util_caches_response_cache", "", ["name"]) class EvictionReason(Enum): -- cgit 1.4.1 From b5effc72016021cc38f8d7949420d9246787fe11 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Sep 2022 12:43:04 +0100 Subject: Update trial old deps CI to use poetry 1.2.0 (#13707) --- .ci/scripts/prepare_old_deps.sh | 64 +++++++++++++++++++++++++++++++ .ci/scripts/test_old_deps.sh | 83 ----------------------------------------- .github/workflows/tests.yml | 45 ++++++++++++++++++---- changelog.d/13707.misc | 1 + 4 files changed, 103 insertions(+), 90 deletions(-) create mode 100755 .ci/scripts/prepare_old_deps.sh delete mode 100755 .ci/scripts/test_old_deps.sh create mode 100644 changelog.d/13707.misc diff --git a/.ci/scripts/prepare_old_deps.sh b/.ci/scripts/prepare_old_deps.sh new file mode 100755 index 0000000000..7e4f060b17 --- /dev/null +++ b/.ci/scripts/prepare_old_deps.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# this script is run by GitHub Actions in a plain `focal` container; it +# - installs the minimal system requirements, and poetry; +# - patches the project definition file to refer to old versions only; +# - creates a venv with these old versions using poetry; and finally +# - invokes `trial` to run the tests with old deps. + +set -ex + +# Prevent virtualenv from auto-updating pip to an incompatible version +export VIRTUALENV_NO_DOWNLOAD=1 + +# TODO: in the future, we could use an implementation of +# https://github.com/python-poetry/poetry/issues/3527 +# https://github.com/pypa/pip/issues/8085 +# to select the lowest possible versions, rather than resorting to this sed script. + +# Patch the project definitions in-place: +# - Replace all lower and tilde bounds with exact bounds +# - Replace all caret bounds---but not the one that defines the supported Python version! +# - Delete all lines referring to psycopg2 --- so no testing of postgres support. +# - Use pyopenssl 17.0, which is the oldest version that works with +# a `cryptography` compiled against OpenSSL 1.1. +# - Omit systemd: we're not logging to journal here. + +sed -i \ + -e "s/[~>]=/==/g" \ + -e '/^python = "^/!s/\^/==/g' \ + -e "/psycopg2/d" \ + -e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \ + -e '/systemd/d' \ + pyproject.toml + +# Use poetry to do the installation. This ensures that the versions are all mutually +# compatible (as far the package metadata declares, anyway); pip's package resolver +# is more lax. +# +# Rather than `poetry install --no-dev`, we drop all dev dependencies from the +# toml file. This means we don't have to ensure compatibility between old deps and +# dev tools. + +pip install toml wheel + +REMOVE_DEV_DEPENDENCIES=" +import toml +with open('pyproject.toml', 'r') as f: + data = toml.loads(f.read()) + +del data['tool']['poetry']['dev-dependencies'] + +with open('pyproject.toml', 'w') as f: + toml.dump(data, f) +" +python3 -c "$REMOVE_DEV_DEPENDENCIES" + +pip install poetry==1.2.0 +poetry lock + +echo "::group::Patched pyproject.toml" +cat pyproject.toml +echo "::endgroup::" +echo "::group::Lockfile after patch" +cat poetry.lock +echo "::endgroup::" diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/test_old_deps.sh deleted file mode 100755 index 478c8d639a..0000000000 --- a/.ci/scripts/test_old_deps.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash -# this script is run by GitHub Actions in a plain `focal` container; it -# - installs the minimal system requirements, and poetry; -# - patches the project definition file to refer to old versions only; -# - creates a venv with these old versions using poetry; and finally -# - invokes `trial` to run the tests with old deps. - -# Prevent tzdata from asking for user input -export DEBIAN_FRONTEND=noninteractive - -set -ex - -apt-get update -apt-get install -y \ - python3 python3-dev python3-pip python3-venv pipx \ - libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev - -export LANG="C.UTF-8" - -# Prevent virtualenv from auto-updating pip to an incompatible version -export VIRTUALENV_NO_DOWNLOAD=1 - -# TODO: in the future, we could use an implementation of -# https://github.com/python-poetry/poetry/issues/3527 -# https://github.com/pypa/pip/issues/8085 -# to select the lowest possible versions, rather than resorting to this sed script. - -# Patch the project definitions in-place: -# - Replace all lower and tilde bounds with exact bounds -# - Replace all caret bounds---but not the one that defines the supported Python version! -# - Delete all lines referring to psycopg2 --- so no testing of postgres support. -# - Use pyopenssl 17.0, which is the oldest version that works with -# a `cryptography` compiled against OpenSSL 1.1. -# - Omit systemd: we're not logging to journal here. - -# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints -# We don't use these yet, but IIRC they are the default bound used when you `poetry add`. -# The sed expression 's/\^/==/g' ought to do the trick. But it would also change -# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps -# runs on 3.8 (#12343). - -sed -i \ - -e "s/[~>]=/==/g" \ - -e '/^python = "^/!s/\^/==/g' \ - -e "/psycopg2/d" \ - -e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \ - -e '/systemd/d' \ - pyproject.toml - -# Use poetry to do the installation. This ensures that the versions are all mutually -# compatible (as far the package metadata declares, anyway); pip's package resolver -# is more lax. -# -# Rather than `poetry install --no-dev`, we drop all dev dependencies from the -# toml file. This means we don't have to ensure compatibility between old deps and -# dev tools. - -pip install --user toml - -REMOVE_DEV_DEPENDENCIES=" -import toml -with open('pyproject.toml', 'r') as f: - data = toml.loads(f.read()) - -del data['tool']['poetry']['dev-dependencies'] - -with open('pyproject.toml', 'w') as f: - toml.dump(data, f) -" -python3 -c "$REMOVE_DEV_DEPENDENCIES" - -pipx install poetry==1.1.14 -~/.local/bin/poetry lock - -echo "::group::Patched pyproject.toml" -cat pyproject.toml -echo "::endgroup::" -echo "::group::Lockfile after patch" -cat poetry.lock -echo "::endgroup::" - -~/.local/bin/poetry install -E "all test" -~/.local/bin/poetry run trial --jobs=2 tests diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bc1de2893c..16fb4b43e2 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -135,16 +135,47 @@ jobs: # Note: sqlite only; no postgres if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail needs: linting-done - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 - - name: Test with old deps - uses: docker://ubuntu:focal # For old python and sqlite - # Note: focal seems to be using 3.8, but the oldest is 3.7? - # See https://github.com/matrix-org/synapse/issues/12343 + + # There aren't wheels for some of the older deps, so we need to install + # their build dependencies + - run: | + sudo apt-get -qq install build-essential libffi-dev python-dev \ + libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev + + - uses: actions/setup-python@v4 + with: + python-version: '3.7' + + # Calculating the old-deps actually takes a bunch of time, so we cache the + # pyproject.toml / poetry.lock. We need to cache pyproject.toml as + # otherwise the `poetry install` step will error due to the poetry.lock + # file being outdated. + # + # This caches the output of `Prepare old deps`, which should generate the + # same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input. + - uses: actions/cache@v3 + id: cache-poetry-old-deps + name: Cache poetry.lock with: - workdir: /github/workspace - entrypoint: .ci/scripts/test_old_deps.sh + path: | + poetry.lock + pyproject.toml + key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }} + - name: Prepare old deps + if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true' + run: .ci/scripts/prepare_old_deps.sh + + # We only now install poetry so that `setup-python-poetry` caches the + # right poetry.lock's dependencies. + - uses: matrix-org/setup-python-poetry@v1 + with: + python-version: '3.7' + extras: "all test" + + - run: poetry run trial -j 2 tests - name: Dump logs # Logs are most useful when the command fails, always include them. if: ${{ always() }} diff --git a/changelog.d/13707.misc b/changelog.d/13707.misc new file mode 100644 index 0000000000..e72c322d2e --- /dev/null +++ b/changelog.d/13707.misc @@ -0,0 +1 @@ +Update trial old deps CI to use poetry 1.2.0. -- cgit 1.4.1 From ec2fe7bb53f3e37694da421f37be709b19aea499 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Sep 2022 13:04:22 +0100 Subject: Fixup changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f0d3db7849..ad277f32e5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -30,9 +30,9 @@ Bugfixes - Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583)) - Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585)) - Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616)) -- Fix the running of MSC1763 retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632)) +- Fix the running of [MSC1763](https://github.com/matrix-org/matrix-spec-proposals/pull/1763) retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632)) - Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657)) -- Fix MSC3030 `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658)) +- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658)) - Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660)) - Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683)) - Fix a bug introduced in Synapse v1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694)) -- cgit 1.4.1 From a4ecb8e35309d780f5d4e93fb4998b90c9068e8a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 6 Sep 2022 14:29:16 +0100 Subject: Actually fix typechecking with latest types-jsonschema (#13724) --- changelog.d/13724.misc | 1 + synapse/api/filtering.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13724.misc diff --git a/changelog.d/13724.misc b/changelog.d/13724.misc new file mode 100644 index 0000000000..2c4f6b19f6 --- /dev/null +++ b/changelog.d/13724.misc @@ -0,0 +1 @@ +Fix typechecking with latest types-jsonschema. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 102889ac49..f7f46f8d80 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -140,13 +140,13 @@ USER_FILTER_SCHEMA = { @FormatChecker.cls_checks("matrix_room_id") -def matrix_room_id_validator(room_id_str: str) -> bool: - return RoomID.is_valid(room_id_str) +def matrix_room_id_validator(room_id: object) -> bool: + return isinstance(room_id, str) and RoomID.is_valid(room_id) @FormatChecker.cls_checks("matrix_user_id") -def matrix_user_id_validator(user_id_str: str) -> bool: - return UserID.is_valid(user_id_str) +def matrix_user_id_validator(user_id: object) -> bool: + return isinstance(user_id, str) and UserID.is_valid(user_id) class Filtering: -- cgit 1.4.1 From 3d201151152ca8ba9b9aae8da5b76a26044cc85f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Sep 2022 15:21:55 +0100 Subject: Fix trial-olddeps (#13725) --- changelog.d/13725.misc | 1 + poetry.lock | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13725.misc diff --git a/changelog.d/13725.misc b/changelog.d/13725.misc new file mode 100644 index 0000000000..e72c322d2e --- /dev/null +++ b/changelog.d/13725.misc @@ -0,0 +1 @@ +Update trial old deps CI to use poetry 1.2.0. diff --git a/poetry.lock b/poetry.lock index 35021390bf..44df7d395c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1600,7 +1600,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "7de518bf27967b3547eab8574342cfb67f87d6b47b4145c13de11112141dbf2d" +content-hash = "0df36bf75561fef340a7af704ed379b235f07a7d4a231aaccec5e7afb87159ca" [metadata.files] attrs = [ diff --git a/pyproject.toml b/pyproject.toml index a41d88ea74..8b2b5060b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -126,7 +126,7 @@ pyOpenSSL = ">=16.0.0" PyYAML = ">=3.11" pyasn1 = ">=0.1.9" pyasn1-modules = ">=0.0.7" -bcrypt = ">=3.1.0" +bcrypt = ">=3.1.7" Pillow = ">=5.4.0" sortedcontainers = ">=1.4.4" pymacaroons = ">=0.13.0" -- cgit 1.4.1 From c9b7e9735508bb148c6ad59c433d71e5b8b360ad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Sep 2022 19:01:37 +0100 Subject: Add a stub Rust crate (#12595) --- .dockerignore | 4 ++ .github/workflows/release-artifacts.yml | 65 +++++++++++++++++++++++++++++++-- .github/workflows/tests.yml | 19 +++++++++- .gitignore | 7 ++++ Cargo.toml | 5 +++ build_rust.py | 20 ++++++++++ changelog.d/12595.misc | 1 + debian/build_virtualenv | 7 +++- debian/changelog | 4 ++ debian/rules | 2 + docker/Dockerfile | 14 ++++++- docker/Dockerfile-dhvirtualenv | 10 +++++ docs/deprecation_policy.md | 13 +++++++ docs/development/contributing_guide.md | 10 ++++- docs/setup/installation.md | 4 ++ mypy.ini | 6 ++- poetry.lock | 35 +++++++++++++++++- pyproject.toml | 39 +++++++++++++++++++- rust/Cargo.toml | 21 +++++++++++ rust/src/lib.rs | 15 ++++++++ stubs/synapse/__init__.pyi | 0 stubs/synapse/synapse_rust.pyi | 1 + tests/test_rust.py | 11 ++++++ 23 files changed, 302 insertions(+), 11 deletions(-) create mode 100644 Cargo.toml create mode 100644 build_rust.py create mode 100644 changelog.d/12595.misc create mode 100644 rust/Cargo.toml create mode 100644 rust/src/lib.rs create mode 100644 stubs/synapse/__init__.pyi create mode 100644 stubs/synapse/synapse_rust.pyi create mode 100644 tests/test_rust.py diff --git a/.dockerignore b/.dockerignore index 7809863ef3..8eb1e4df8a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,8 +4,12 @@ # things to include !docker !synapse +!rust !README.rst !pyproject.toml !poetry.lock +!build_rust.py + +rust/target **/__pycache__ diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index ed4fc6179d..0708d631cd 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -15,7 +15,7 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true - + permissions: contents: write @@ -89,9 +89,67 @@ jobs: name: debs path: debs/* + build-wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-20.04, macos-10.15] + is_pr: + - ${{ startsWith(github.ref, 'refs/pull/') }} + + exclude: + # Don't build macos wheels on PR CI. + - is_pr: true + os: "macos-10.15" + + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-python@v3 + + - name: Install cibuildwheel + run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0 + + # Only build a single wheel in CI. + - name: Set env vars. + run: | + echo "CIBW_BUILD="cp37-manylinux_x86_64"" >> $GITHUB_ENV + if: startsWith(github.ref, 'refs/pull/') + + - name: Build wheels + run: python -m cibuildwheel --output-dir wheelhouse + env: + # Skip testing for platforms which various libraries don't have wheels + # for, and so need extra build deps. + CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx* + + - uses: actions/upload-artifact@v3 + with: + name: Wheel + path: ./wheelhouse/*.whl + build-sdist: - name: "Build pypi distribution files" - uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1" + name: Build sdist + runs-on: ubuntu-latest + if: ${{ !startsWith(github.ref, 'refs/pull/') }} + + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - run: pip install build + + - name: Build sdist + run: python -m build --sdist + + - uses: actions/upload-artifact@v2 + with: + name: Sdist + path: dist/*.tar.gz + # if it's a tag, create a release and attach the artifacts to it attach-assets: @@ -99,6 +157,7 @@ jobs: if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }} needs: - build-debs + - build-wheels - build-sdist runs-on: ubuntu-latest steps: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 16fb4b43e2..5f96bdfa7f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -139,6 +139,12 @@ jobs: steps: - uses: actions/checkout@v2 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.61.0 + override: true + # There aren't wheels for some of the older deps, so we need to install # their build dependencies - run: | @@ -175,7 +181,7 @@ jobs: python-version: '3.7' extras: "all test" - - run: poetry run trial -j 2 tests + - run: poetry run trial -j2 tests - name: Dump logs # Logs are most useful when the command fails, always include them. if: ${{ always() }} @@ -247,6 +253,11 @@ jobs: - uses: actions/checkout@v2 - name: Prepare test blacklist run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.61.0 + override: true - name: Run SyTest run: /bootstrap.sh synapse working-directory: /src @@ -353,6 +364,12 @@ jobs: with: path: synapse + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.61.0 + override: true + - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh diff --git a/.gitignore b/.gitignore index e58affb241..31a60bb7bd 100644 --- a/.gitignore +++ b/.gitignore @@ -60,3 +60,10 @@ book/ # complement /complement-* /master.tar.gz + +# rust +/target/ +/synapse/*.so + +# Poetry will create a setup.py, which we don't want to include. +/setup.py diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000000..de141bdee9 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,5 @@ +# We make the whole Synapse folder a workspace so that we can run `cargo` +# commands from the root (rather than having to cd into rust/). + +[workspace] +members = ["rust"] diff --git a/build_rust.py b/build_rust.py new file mode 100644 index 0000000000..5c5e557ee8 --- /dev/null +++ b/build_rust.py @@ -0,0 +1,20 @@ +# A build script for poetry that adds the rust extension. + +import os +from typing import Any, Dict + +from setuptools_rust import Binding, RustExtension + + +def build(setup_kwargs: Dict[str, Any]) -> None: + original_project_dir = os.path.dirname(os.path.realpath(__file__)) + cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml") + + extension = RustExtension( + target="synapse.synapse_rust", + path=cargo_toml_path, + binding=Binding.PyO3, + py_limited_api=True, + ) + setup_kwargs.setdefault("rust_extensions", []).append(extension) + setup_kwargs["zip_safe"] = False diff --git a/changelog.d/12595.misc b/changelog.d/12595.misc new file mode 100644 index 0000000000..2e0dd68a0f --- /dev/null +++ b/changelog.d/12595.misc @@ -0,0 +1 @@ +Add a stub Rust crate. diff --git a/debian/build_virtualenv b/debian/build_virtualenv index ed916ac97a..dd97e888ba 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -61,7 +61,7 @@ dh_virtualenv \ --extras="all,systemd,test" \ --requirements="exported_requirements.txt" -PACKAGE_BUILD_DIR="debian/matrix-synapse-py3" +PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3" VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse" TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python" @@ -78,9 +78,14 @@ case "$DEB_BUILD_OPTIONS" in cp -r tests "$tmpdir" + # To avoid pulling in the unbuilt Synapse in the local directory + pushd / + PYTHONPATH="$tmpdir" \ "${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests + popd + ;; esac diff --git a/debian/changelog b/debian/changelog index 2b7b329b6b..bd2d56e738 100644 --- a/debian/changelog +++ b/debian/changelog @@ -12,11 +12,15 @@ matrix-synapse-py3 (1.66.0) stable; urgency=medium matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium + [ Jörg Behrmann ] * Update debhelper to compatibility level 12. * Drop the preinst script stopping synapse. * Allocate a group for the system user. * Change dpkg-statoverride to --force-statoverride-add. + [ Erik Johnston ] + * Disable `dh_auto_configure` as it broke during Rust build. + -- Jörg Behrmann Tue, 23 Aug 2022 17:17:00 +0100 matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium diff --git a/debian/rules b/debian/rules index 3b79d56074..914d068f2a 100755 --- a/debian/rules +++ b/debian/rules @@ -12,6 +12,8 @@ override_dh_installsystemd: # we don't really want to strip the symbols from our object files. override_dh_strip: +override_dh_auto_configure: + # many libraries pulled from PyPI have allocatable sections after # non-allocatable ones on which dwz errors out. For those without the issue the # gains are only marginal diff --git a/docker/Dockerfile b/docker/Dockerfile index b87d263cff..a057bf397b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -92,11 +92,20 @@ RUN \ libxml++2.6-dev \ libxslt1-dev \ openssl \ - rustc \ zlib1g-dev \ git \ + curl \ && rm -rf /var/lib/apt/lists/* + +# Install rust and ensure its in the PATH +ENV RUSTUP_HOME=/rust +ENV CARGO_HOME=/cargo +ENV PATH=/cargo/bin:/rust/bin:$PATH +RUN mkdir /rust /cargo + +RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable + # To speed up rebuilds, install all of the dependencies before we copy over # the whole synapse project, so that this layer in the Docker cache can be # used while you develop on the source @@ -108,8 +117,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \ # Copy over the rest of the synapse source code. COPY synapse /synapse/synapse/ +COPY rust /synapse/rust/ # ... and what we need to `pip install`. -COPY pyproject.toml README.rst /synapse/ +COPY pyproject.toml README.rst build_rust.py /synapse/ # Repeat of earlier build argument declaration, as this is a new build stage. ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index fbc1d2346f..ca3a259081 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -72,6 +72,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \ && env DEBIAN_FRONTEND=noninteractive apt-get install \ -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \ build-essential \ + curl \ debhelper \ devscripts \ libsystemd-dev \ @@ -85,6 +86,15 @@ RUN apt-get update -qq -o Acquire::Languages=none \ libpq-dev \ xmlsec1 +# Install rust and ensure it's in the PATH +ENV RUSTUP_HOME=/rust +ENV CARGO_HOME=/cargo +ENV PATH=/cargo/bin:/rust/bin:$PATH +RUN mkdir /rust /cargo + +RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable + + COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb / # install dhvirtualenv. Update the apt cache again first, in case we got a diff --git a/docs/deprecation_policy.md b/docs/deprecation_policy.md index 359dac07c3..b8a46e3d60 100644 --- a/docs/deprecation_policy.md +++ b/docs/deprecation_policy.md @@ -18,6 +18,12 @@ documented at [https://endoflife.date/python](https://endoflife.date/python) and [https://endoflife.date/postgresql](https://endoflife.date/postgresql). +A Rust compiler is required to build Synapse from source. For any given release +the minimum required version may be bumped up to a recent Rust version, and so +people building from source should ensure they can fetch recent versions of Rust +(e.g. by using [rustup](https://rustup.rs/)). + + Context ------- @@ -31,3 +37,10 @@ long process. By following the upstream support life cycles Synapse can ensure that its dependencies continue to get security patches, while not requiring system admins to constantly update their platform dependencies to the latest versions. + +For Rust, the situation is a bit different given that a) the Rust foundation +does not generally support older Rust versions, and b) the library ecosystem +generally bump their minimum support Rust versions frequently. In general, the +Synapse team will try to avoid updating the dependency on Rust to the absolute +latest version, but introducing a formal policy is hard given the constraints of +the ecosystem. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 4e1df51164..cb0d727efa 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -28,6 +28,9 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/). +A recent version of the Rust compiler is needed to build the native modules. The +easiest way of installing the latest version is to use [rustup](https://rustup.rs/). + # 3. Get the source. @@ -114,6 +117,11 @@ Some documentation also exists in [Synapse's GitHub Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily contributed to by community authors. +When changes are made to any Rust code then you must call either `poetry install` +or `maturin develop` (if installed) to rebuild the Rust code. Using [`maturin`](https://github.com/PyO3/maturin) +is quicker than `poetry install`, so is recommended when making frequent +changes to the Rust code. + # 8. Test, test, test! @@ -195,7 +203,7 @@ The database file can then be inspected with: sqlite3 _trial_temp/test.db ``` -Note that the database file is cleared at the beginning of each test run. Thus it +Note that the database file is cleared at the beginning of each test run. Thus it will always only contain the data generated by the *last run test*. Though generally when debugging, one is only running a single test anyway. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index bb78b3267a..90737520ba 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -196,6 +196,10 @@ System requirements: - Python 3.7 or later, up to Python 3.10. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org +If building on an uncommon architecture for which pre-built wheels are +unavailable, you will need to have a recent Rust compiler installed. The easiest +way of installing the latest version is to use [rustup](https://rustup.rs/). + To install the Synapse homeserver run: ```sh diff --git a/mypy.ini b/mypy.ini index e2034e411f..64f9097206 100644 --- a/mypy.ini +++ b/mypy.ini @@ -16,7 +16,8 @@ files = docker/, scripts-dev/, synapse/, - tests/ + tests/, + build_rust.py # Note: Better exclusion syntax coming in mypy > 0.910 # https://github.com/python/mypy/pull/11329 @@ -181,3 +182,6 @@ ignore_missing_imports = True [mypy-incremental.*] ignore_missing_imports = True + +[mypy-setuptools_rust.*] +ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock index 44df7d395c..cdc69f8ea9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1035,6 +1035,18 @@ python-versions = ">=3.6" cryptography = ">=2.0" jeepney = ">=0.6" +[[package]] +name = "semantic-version" +version = "2.10.0" +description = "A library implementing the 'SemVer' scheme." +category = "main" +optional = false +python-versions = ">=2.7" + +[package.extras] +dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] +doc = ["Sphinx", "sphinx-rtd-theme"] + [[package]] name = "sentry-sdk" version = "1.5.11" @@ -1099,6 +1111,19 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-g testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +[[package]] +name = "setuptools-rust" +version = "1.5.1" +description = "Setuptools Rust extension plugin" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +semantic-version = ">=2.8.2,<3" +setuptools = ">=62.4" +typing-extensions = ">=3.7.4.3" + [[package]] name = "signedjson" version = "1.1.4" @@ -1600,7 +1625,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "0df36bf75561fef340a7af704ed379b235f07a7d4a231aaccec5e7afb87159ca" +content-hash = "79cfa09d59f9f8b5ef24318fb860df1915f54328692aa56d04331ecbdd92a8cb" [metadata.files] attrs = [ @@ -2472,6 +2497,10 @@ secretstorage = [ {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"}, {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"}, ] +semantic-version = [ + {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, + {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, +] sentry-sdk = [ {file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"}, {file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"}, @@ -2484,6 +2513,10 @@ setuptools = [ {file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"}, {file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"}, ] +setuptools-rust = [ + {file = "setuptools-rust-1.5.1.tar.gz", hash = "sha256:0e05e456645d59429cb1021370aede73c0760e9360bbfdaaefb5bced530eb9d7"}, + {file = "setuptools_rust-1.5.1-py3-none-any.whl", hash = "sha256:306b236ff3aa5229180e58292610d0c2c51bb488191122d2fc559ae4caeb7d5e"}, +] signedjson = [ {file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"}, {file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"}, diff --git a/pyproject.toml b/pyproject.toml index 8b2b5060b1..7cc9de5bc7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,9 @@ include_trailing_comma = true combine_as_imports = true skip_gitignore = true +[tool.maturin] +manifest-path = "rust/Cargo.toml" + [tool.poetry] name = "matrix-synapse" version = "1.66.0" @@ -82,8 +85,17 @@ include = [ { path = "sytest-blacklist", format = "sdist" }, { path = "tests", format = "sdist" }, { path = "UPGRADE.rst", format = "sdist" }, + { path = "Cargo.toml", format = "sdist" }, + { path = "rust/Cargo.toml", format = "sdist" }, + { path = "rust/Cargo.lock", format = "sdist" }, + { path = "rust/src/**", format = "sdist" }, +] +exclude = [ + { path = "synapse/*.so", format = "sdist"} ] +build = "build_rust.py" + [tool.poetry.scripts] synapse_homeserver = "synapse.app.homeserver:main" synapse_worker = "synapse.app.generic_worker:main" @@ -161,6 +173,15 @@ importlib_metadata = { version = ">=1.4", python = "<3.8" } # This is the most recent version of Pydantic with available on common distros. pydantic = ">=1.7.4" +# This is for building the rust components during "poetry install", which +# currently ignores the `build-system.requires` directive (c.f. +# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and +# `poetry build` do the right thing without this explicit dependency. +# +# This isn't really a dev-dependency, as `poetry install --no-dev` will fail, +# but the alternative is to add it to the main list of deps where it isn't +# needed. +setuptools_rust = ">=1.3" # Optional Dependencies @@ -285,5 +306,21 @@ twine = "*" towncrier = ">=18.6.0rc1" [build-system] -requires = ["poetry-core>=1.0.0"] +requires = ["poetry-core>=1.0.0", "setuptools_rust>=1.3"] build-backend = "poetry.core.masonry.api" + + +[tool.cibuildwheel] +# Skip unsupported platforms (by us or by Rust). +skip = "cp36* *-musllinux_i686" + +# We need a rust compiler +before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y" +environment= { PATH = "$PATH:$HOME/.cargo/bin" } + +# For some reason if we don't manually clean the build directory we +# can end up polluting the next build with a .so that is for the wrong +# Python version. +before-build = "rm -rf {project}/build" +build-frontend = "build" +test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'" diff --git a/rust/Cargo.toml b/rust/Cargo.toml new file mode 100644 index 0000000000..0a9760cafc --- /dev/null +++ b/rust/Cargo.toml @@ -0,0 +1,21 @@ +[package] +# We name the package `synapse` so that things like logging have the right +# logging target. +name = "synapse" + +# dummy version. See pyproject.toml for the Synapse's version number. +version = "0.1.0" + +edition = "2021" +rust-version = "1.61.0" + +[lib] +name = "synapse" +crate-type = ["cdylib"] + +[package.metadata.maturin] +# This is where we tell maturin where to place the built library. +name = "synapse.synapse_rust" + +[dependencies] +pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] } diff --git a/rust/src/lib.rs b/rust/src/lib.rs new file mode 100644 index 0000000000..fc4eb39154 --- /dev/null +++ b/rust/src/lib.rs @@ -0,0 +1,15 @@ +use pyo3::prelude::*; + +/// Formats the sum of two numbers as string. +#[pyfunction] +#[pyo3(text_signature = "(a, b, /)")] +fn sum_as_string(a: usize, b: usize) -> PyResult { + Ok((a + b).to_string()) +} + +/// The entry point for defining the Python module. +#[pymodule] +fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> { + m.add_function(wrap_pyfunction!(sum_as_string, m)?)?; + Ok(()) +} diff --git a/stubs/synapse/__init__.pyi b/stubs/synapse/__init__.pyi new file mode 100644 index 0000000000..e69de29bb2 diff --git a/stubs/synapse/synapse_rust.pyi b/stubs/synapse/synapse_rust.pyi new file mode 100644 index 0000000000..5b51ba05d7 --- /dev/null +++ b/stubs/synapse/synapse_rust.pyi @@ -0,0 +1 @@ +def sum_as_string(a: int, b: int) -> str: ... diff --git a/tests/test_rust.py b/tests/test_rust.py new file mode 100644 index 0000000000..55d8b6b28c --- /dev/null +++ b/tests/test_rust.py @@ -0,0 +1,11 @@ +from synapse.synapse_rust import sum_as_string + +from tests import unittest + + +class RustTestCase(unittest.TestCase): + """Basic tests to ensure that we can call into Rust code.""" + + def test_basic(self): + result = sum_as_string(1, 2) + self.assertEqual("3", result) -- cgit 1.4.1 From 26bc26586b4b95d63ce7e453e9312469843f796e Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 6 Sep 2022 19:28:44 +0000 Subject: Remove the unspecced room_id field in the /hierarchy response. (#13506) This is a re-do of 57d334a13d983406ea452dfa203bbe4837509c4e (#13365), which was backed out in 12abd724974a2311d5311272d26d2f8aa11734a9 (#13501). The `room_id` field represented the parent space for each room and was made redundant by changes in the API shape where the `children_state` is now nested underneath each `room`. The room ID of each child is in the `state_key` field and is still available. --- changelog.d/13506.bugfix | 1 + synapse/handlers/room_summary.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/13506.bugfix diff --git a/changelog.d/13506.bugfix b/changelog.d/13506.bugfix new file mode 100644 index 0000000000..2e43668865 --- /dev/null +++ b/changelog.d/13506.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). \ No newline at end of file diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 732b0310bc..ebd445adca 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -453,7 +453,6 @@ class RoomSummaryHandler: "type": e.type, "state_key": e.state_key, "content": e.content, - "room_id": e.room_id, "sender": e.sender, "origin_server_ts": e.origin_server_ts, } -- cgit 1.4.1 From bb5b47b62a11b14a3458e5a8aafd9ddaf1294199 Mon Sep 17 00:00:00 2001 From: Connor Davis Date: Wed, 7 Sep 2022 05:54:44 -0400 Subject: Add Admin API to Fetch Messages Within a Particular Window (#13672) This adds two new admin APIs that allow us to fetch messages from a room within a particular time. --- changelog.d/13672.feature | 1 + docs/admin_api/rooms.md | 145 +++++++++++++++++++++++++++++++++++++ synapse/handlers/pagination.py | 37 ++++++---- synapse/rest/admin/__init__.py | 4 ++ synapse/rest/admin/rooms.py | 104 +++++++++++++++++++++++++++ tests/rest/admin/test_room.py | 158 ++++++++++++++++++++++++++++++++++++++++- 6 files changed, 435 insertions(+), 14 deletions(-) create mode 100644 changelog.d/13672.feature diff --git a/changelog.d/13672.feature b/changelog.d/13672.feature new file mode 100644 index 0000000000..2334e6fe15 --- /dev/null +++ b/changelog.d/13672.feature @@ -0,0 +1 @@ +Add admin APIs to fetch messages within a particular window of time. diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 7526956bec..8f727b363e 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -393,6 +393,151 @@ A response body like the following is returned: } ``` +# Room Messages API + +The Room Messages admin API allows server admins to get all messages +sent to a room in a given timeframe. There are various parameters available +that allow for filtering and ordering the returned list. This API supports pagination. + +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + +This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages). + +The API is: +``` +GET /_synapse/admin/v1/rooms//messages +``` + +**Parameters** + +The following path parameters are required: + +* `room_id` - The ID of the room you wish you fetch messages from. + +The following query parameters are available: + +* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch + or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint. +* `to` - The token to spot returning events at. +* `limit` - The maximum number of events to return. Defaults to `10`. +* `filter` - A JSON RoomEventFilter to filter returned events with. +* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting + this value to `b` will reverse the above sort order. Defaults to `f`. + +**Response** + +The following fields are possible in the JSON response body: + +* `chunk` - A list of room events. The order depends on the dir parameter. + Note that an empty chunk does not necessarily imply that no more events are available. Clients should continue to paginate until no end property is returned. +* `end` - A token corresponding to the end of chunk. This token can be passed back to this endpoint to request further events. + If no further events are available, this property is omitted from the response. +* `start` - A token corresponding to the start of chunk. +* `state` - A list of state events relevant to showing the chunk. + +**Example** + +For more details on each chunk, read [the Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages). + +```json +{ + "chunk": [ + { + "content": { + "body": "This is an example text message", + "format": "org.matrix.custom.html", + "formatted_body": "This is an example text message", + "msgtype": "m.text" + }, + "event_id": "$143273582443PhrSn:example.org", + "origin_server_ts": 1432735824653, + "room_id": "!636q39766251:example.com", + "sender": "@example:example.org", + "type": "m.room.message", + "unsigned": { + "age": 1234 + } + }, + { + "content": { + "name": "The room name" + }, + "event_id": "$143273582443PhrSn:example.org", + "origin_server_ts": 1432735824653, + "room_id": "!636q39766251:example.com", + "sender": "@example:example.org", + "state_key": "", + "type": "m.room.name", + "unsigned": { + "age": 1234 + } + }, + { + "content": { + "body": "Gangnam Style", + "info": { + "duration": 2140786, + "h": 320, + "mimetype": "video/mp4", + "size": 1563685, + "thumbnail_info": { + "h": 300, + "mimetype": "image/jpeg", + "size": 46144, + "w": 300 + }, + "thumbnail_url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe", + "w": 480 + }, + "msgtype": "m.video", + "url": "mxc://example.org/a526eYUSFFxlgbQYZmo442" + }, + "event_id": "$143273582443PhrSn:example.org", + "origin_server_ts": 1432735824653, + "room_id": "!636q39766251:example.com", + "sender": "@example:example.org", + "type": "m.room.message", + "unsigned": { + "age": 1234 + } + } + ], + "end": "t47409-4357353_219380_26003_2265", + "start": "t47429-4392820_219380_26003_2265" +} +``` + +# Room Timestamp to Event API + +The Room Timestamp to Event API endpoint fetches the `event_id` of the closest event to the given +timestamp (`ts` query parameter) in the given direction (`dir` query parameter). + +Useful for cases like jump to date so you can start paginating messages from +a given date in the archive. + +The API is: +``` + GET /_synapse/admin/v1/rooms//timestamp_to_event +``` + +**Parameters** + +The following path parameters are required: + +* `room_id` - The ID of the room you wish to check. + +The following query parameters are available: + +* `ts` - a timestamp in milliseconds where we will find the closest event in + the given direction. +* `dir` - can be `f` or `b` to indicate forwards and backwards in time from the + given timestamp. Defaults to `f`. + +**Response** + +* `event_id` - converted from timestamp + # Block Room API The Block Room admin API allows server admins to block and unblock rooms, and query to see if a given room is blocked. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a0c39778ab..1f83bab836 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -26,6 +26,7 @@ from synapse.events.utils import SerializeEventConfig from synapse.handlers.room import ShutdownRoomResponse from synapse.logging.opentracing import trace from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.rest.admin._base import assert_user_is_admin from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, Requester, StreamKeyType @@ -423,6 +424,7 @@ class PaginationHandler: pagin_config: PaginationConfig, as_client_event: bool = True, event_filter: Optional[Filter] = None, + use_admin_priviledge: bool = False, ) -> JsonDict: """Get messages in a room. @@ -432,10 +434,16 @@ class PaginationHandler: pagin_config: The pagination config rules to apply, if any. as_client_event: True to get events in client-server format. event_filter: Filter to apply to results or None + use_admin_priviledge: if `True`, return all events, regardless + of whether `user` has access to them. To be used **ONLY** + from the admin API. Returns: Pagination API results """ + if use_admin_priviledge: + await assert_user_is_admin(self.auth, requester) + user_id = requester.user.to_string() if pagin_config.from_token: @@ -458,12 +466,14 @@ class PaginationHandler: room_token = from_token.room_key async with self.pagination_lock.read(room_id): - ( - membership, - member_event_id, - ) = await self.auth.check_user_in_room_or_world_readable( - room_id, requester, allow_departed_users=True - ) + (membership, member_event_id) = (None, None) + if not use_admin_priviledge: + ( + membership, + member_event_id, + ) = await self.auth.check_user_in_room_or_world_readable( + room_id, requester, allow_departed_users=True + ) if pagin_config.direction == "b": # if we're going backwards, we might need to backfill. This @@ -475,7 +485,7 @@ class PaginationHandler: room_id, room_token.stream ) - if membership == Membership.LEAVE: + if not use_admin_priviledge and membership == Membership.LEAVE: # If they have left the room then clamp the token to be before # they left the room, to save the effort of loading from the # database. @@ -528,12 +538,13 @@ class PaginationHandler: if event_filter: events = await event_filter.filter(events) - events = await filter_events_for_client( - self._storage_controllers, - user_id, - events, - is_peeking=(member_event_id is None), - ) + if not use_admin_priviledge: + events = await filter_events_for_client( + self._storage_controllers, + user_id, + events, + is_peeking=(member_event_id is None), + ) # if after the filter applied there are no more events # return immediately - but there might be more in next_token batch diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index fa3266720b..bac754e1b1 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -61,9 +61,11 @@ from synapse.rest.admin.rooms import ( MakeRoomAdminRestServlet, RoomEventContextServlet, RoomMembersRestServlet, + RoomMessagesRestServlet, RoomRestServlet, RoomRestV2Servlet, RoomStateRestServlet, + RoomTimestampToEventRestServlet, ) from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet @@ -271,6 +273,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: DestinationResetConnectionRestServlet(hs).register(http_server) DestinationRestServlet(hs).register(http_server) ListDestinationsRestServlet(hs).register(http_server) + RoomMessagesRestServlet(hs).register(http_server) + RoomTimestampToEventRestServlet(hs).register(http_server) # Some servlets only get registered for the main process. if hs.config.worker.worker_app is None: diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 3d870629c4..747e6fda83 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -35,6 +35,7 @@ from synapse.rest.admin._base import ( ) from synapse.storage.databases.main.room import RoomSortOrder from synapse.storage.state import StateFilter +from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, RoomID, UserID, create_requester from synapse.util import json_decoder @@ -858,3 +859,106 @@ class BlockRoomRestServlet(RestServlet): await self._store.unblock_room(room_id) return HTTPStatus.OK, {"block": block} + + +class RoomMessagesRestServlet(RestServlet): + """ + Get messages list of a room. + """ + + PATTERNS = admin_patterns("/rooms/(?P[^/]*)/messages$") + + def __init__(self, hs: "HomeServer"): + self._hs = hs + self._clock = hs.get_clock() + self._pagination_handler = hs.get_pagination_handler() + self._auth = hs.get_auth() + self._store = hs.get_datastores().main + + async def on_GET( + self, request: SynapseRequest, room_id: str + ) -> Tuple[int, JsonDict]: + requester = await self._auth.get_user_by_req(request) + await assert_user_is_admin(self._auth, requester) + + pagination_config = await PaginationConfig.from_request( + self._store, request, default_limit=10 + ) + # Twisted will have processed the args by now. + assert request.args is not None + as_client_event = b"raw" not in request.args + filter_str = parse_string(request, "filter", encoding="utf-8") + if filter_str: + filter_json = urlparse.unquote(filter_str) + event_filter: Optional[Filter] = Filter( + self._hs, json_decoder.decode(filter_json) + ) + if ( + event_filter + and event_filter.filter_json.get("event_format", "client") + == "federation" + ): + as_client_event = False + else: + event_filter = None + + msgs = await self._pagination_handler.get_messages( + room_id=room_id, + requester=requester, + pagin_config=pagination_config, + as_client_event=as_client_event, + event_filter=event_filter, + use_admin_priviledge=True, + ) + + return HTTPStatus.OK, msgs + + +class RoomTimestampToEventRestServlet(RestServlet): + """ + API endpoint to fetch the `event_id` of the closest event to the given + timestamp (`ts` query parameter) in the given direction (`dir` query + parameter). + + Useful for cases like jump to date so you can start paginating messages from + a given date in the archive. + + `ts` is a timestamp in milliseconds where we will find the closest event in + the given direction. + + `dir` can be `f` or `b` to indicate forwards and backwards in time from the + given timestamp. + + GET /_synapse/admin/v1/rooms//timestamp_to_event?ts=&dir= + { + "event_id": ... + } + """ + + PATTERNS = admin_patterns("/rooms/(?P[^/]*)/timestamp_to_event$") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastores().main + self._timestamp_lookup_handler = hs.get_timestamp_lookup_handler() + + async def on_GET( + self, request: SynapseRequest, room_id: str + ) -> Tuple[int, JsonDict]: + requester = await self._auth.get_user_by_req(request) + await assert_user_is_admin(self._auth, requester) + + timestamp = parse_integer(request, "ts", required=True) + direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) + + ( + event_id, + origin_server_ts, + ) = await self._timestamp_lookup_handler.get_event_for_timestamp( + requester, room_id, timestamp, direction + ) + + return HTTPStatus.OK, { + "event_id": event_id, + "origin_server_ts": origin_server_ts, + } diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 9d71a97524..d156be82b0 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import json +import time import urllib.parse from typing import List, Optional from unittest.mock import Mock @@ -22,10 +24,11 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import EventTypes, Membership, RoomTypes from synapse.api.errors import Codes -from synapse.handlers.pagination import PaginationHandler +from synapse.handlers.pagination import PaginationHandler, PurgeStatus from synapse.rest.client import directory, events, login, room from synapse.server import HomeServer from synapse.util import Clock +from synapse.util.stringutils import random_string from tests import unittest @@ -1793,6 +1796,159 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) +class RoomMessagesTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.user = self.register_user("foo", "pass") + self.user_tok = self.login("foo", "pass") + self.room_id = self.helper.create_room_as(self.user, tok=self.user_tok) + + def test_timestamp_to_event(self) -> None: + """Test that providing the current timestamp can get the last event.""" + self.helper.send(self.room_id, body="message 1", tok=self.user_tok) + second_event_id = self.helper.send( + self.room_id, body="message 2", tok=self.user_tok + )["event_id"] + ts = str(round(time.time() * 1000)) + + channel = self.make_request( + "GET", + "/_synapse/admin/v1/rooms/%s/timestamp_to_event?dir=b&ts=%s" + % (self.room_id, ts), + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code) + self.assertIn("event_id", channel.json_body) + self.assertEqual(second_event_id, channel.json_body["event_id"]) + + def test_topo_token_is_accepted(self) -> None: + """Test Topo Token is accepted.""" + token = "t1-0_0_0_0_0_0_0_0_0" + channel = self.make_request( + "GET", + "/_synapse/admin/v1/rooms/%s/messages?from=%s" % (self.room_id, token), + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code) + self.assertIn("start", channel.json_body) + self.assertEqual(token, channel.json_body["start"]) + self.assertIn("chunk", channel.json_body) + self.assertIn("end", channel.json_body) + + def test_stream_token_is_accepted_for_fwd_pagianation(self) -> None: + """Test that stream token is accepted for forward pagination.""" + token = "s0_0_0_0_0_0_0_0_0" + channel = self.make_request( + "GET", + "/_synapse/admin/v1/rooms/%s/messages?from=%s" % (self.room_id, token), + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code) + self.assertIn("start", channel.json_body) + self.assertEqual(token, channel.json_body["start"]) + self.assertIn("chunk", channel.json_body) + self.assertIn("end", channel.json_body) + + def test_room_messages_purge(self) -> None: + """Test room messages can be retrieved by an admin that isn't in the room.""" + store = self.hs.get_datastores().main + pagination_handler = self.hs.get_pagination_handler() + + # Send a first message in the room, which will be removed by the purge. + first_event_id = self.helper.send( + self.room_id, body="message 1", tok=self.user_tok + )["event_id"] + first_token = self.get_success( + store.get_topological_token_for_event(first_event_id) + ) + first_token_str = self.get_success(first_token.to_string(store)) + + # Send a second message in the room, which won't be removed, and which we'll + # use as the marker to purge events before. + second_event_id = self.helper.send( + self.room_id, body="message 2", tok=self.user_tok + )["event_id"] + second_token = self.get_success( + store.get_topological_token_for_event(second_event_id) + ) + second_token_str = self.get_success(second_token.to_string(store)) + + # Send a third event in the room to ensure we don't fall under any edge case + # due to our marker being the latest forward extremity in the room. + self.helper.send(self.room_id, body="message 3", tok=self.user_tok) + + # Check that we get the first and second message when querying /messages. + channel = self.make_request( + "GET", + "/_synapse/admin/v1/rooms/%s/messages?from=%s&dir=b&filter=%s" + % ( + self.room_id, + second_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + chunk = channel.json_body["chunk"] + self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) + + # Purge every event before the second event. + purge_id = random_string(16) + pagination_handler._purges_by_id[purge_id] = PurgeStatus() + self.get_success( + pagination_handler._purge_history( + purge_id=purge_id, + room_id=self.room_id, + token=second_token_str, + delete_local_events=True, + ) + ) + + # Check that we only get the second message through /message now that the first + # has been purged. + channel = self.make_request( + "GET", + "/_synapse/admin/v1/rooms/%s/messages?from=%s&dir=b&filter=%s" + % ( + self.room_id, + second_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + chunk = channel.json_body["chunk"] + self.assertEqual(len(chunk), 1, [event["content"] for event in chunk]) + + # Check that we get no event, but also no error, when querying /messages with + # the token that was pointing at the first event, because we don't have it + # anymore. + channel = self.make_request( + "GET", + "/_synapse/admin/v1/rooms/%s/messages?from=%s&dir=b&filter=%s" + % ( + self.room_id, + first_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + chunk = channel.json_body["chunk"] + self.assertEqual(len(chunk), 0, [event["content"] for event in chunk]) + + class JoinAliasRoomTestCase(unittest.HomeserverTestCase): servlets = [ -- cgit 1.4.1 From c2fe48a6ffb99f553f3eaecb8f15bcbedb58add0 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 7 Sep 2022 10:08:20 +0000 Subject: Rename the `EventFormatVersions` enum values so that they line up with room version numbers. (#13706) --- changelog.d/13706.misc | 1 + synapse/api/room_versions.py | 45 ++++++++++++---------- synapse/event_auth.py | 4 +- synapse/events/__init__.py | 12 +++--- synapse/events/builder.py | 4 +- synapse/events/validator.py | 2 +- synapse/federation/federation_base.py | 2 +- synapse/federation/federation_client.py | 2 +- synapse/storage/databases/main/event_federation.py | 2 +- synapse/storage/databases/main/events_worker.py | 6 +-- tests/storage/databases/main/test_events_worker.py | 2 +- tests/storage/test_event_federation.py | 2 +- tests/test_event_auth.py | 4 +- 13 files changed, 47 insertions(+), 41 deletions(-) create mode 100644 changelog.d/13706.misc diff --git a/changelog.d/13706.misc b/changelog.d/13706.misc new file mode 100644 index 0000000000..65c854c7a9 --- /dev/null +++ b/changelog.d/13706.misc @@ -0,0 +1 @@ +Rename the `EventFormatVersions` enum values so that they line up with room version numbers. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index a0e4ab6db6..e37acb0f1e 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -19,18 +19,23 @@ import attr class EventFormatVersions: """This is an internal enum for tracking the version of the event format, - independently from the room version. + independently of the room version. + + To reduce confusion, the event format versions are named after the room + versions that they were used or introduced in. + The concept of an 'event format version' is specific to Synapse (the + specification does not mention this term.) """ - V1 = 1 # $id:server event id format - V2 = 2 # MSC1659-style $hash event id format: introduced for room v3 - V3 = 3 # MSC1884-style $hash format: introduced for room v4 + ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2 + ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3 + ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4 KNOWN_EVENT_FORMAT_VERSIONS = { - EventFormatVersions.V1, - EventFormatVersions.V2, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V1_V2, + EventFormatVersions.ROOM_V3, + EventFormatVersions.ROOM_V4_PLUS, } @@ -92,7 +97,7 @@ class RoomVersions: V1 = RoomVersion( "1", RoomDisposition.STABLE, - EventFormatVersions.V1, + EventFormatVersions.ROOM_V1_V2, StateResolutionVersions.V1, enforce_key_validity=False, special_case_aliases_auth=True, @@ -110,7 +115,7 @@ class RoomVersions: V2 = RoomVersion( "2", RoomDisposition.STABLE, - EventFormatVersions.V1, + EventFormatVersions.ROOM_V1_V2, StateResolutionVersions.V2, enforce_key_validity=False, special_case_aliases_auth=True, @@ -128,7 +133,7 @@ class RoomVersions: V3 = RoomVersion( "3", RoomDisposition.STABLE, - EventFormatVersions.V2, + EventFormatVersions.ROOM_V3, StateResolutionVersions.V2, enforce_key_validity=False, special_case_aliases_auth=True, @@ -146,7 +151,7 @@ class RoomVersions: V4 = RoomVersion( "4", RoomDisposition.STABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=False, special_case_aliases_auth=True, @@ -164,7 +169,7 @@ class RoomVersions: V5 = RoomVersion( "5", RoomDisposition.STABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=True, @@ -182,7 +187,7 @@ class RoomVersions: V6 = RoomVersion( "6", RoomDisposition.STABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, @@ -200,7 +205,7 @@ class RoomVersions: MSC2176 = RoomVersion( "org.matrix.msc2176", RoomDisposition.UNSTABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, @@ -218,7 +223,7 @@ class RoomVersions: V7 = RoomVersion( "7", RoomDisposition.STABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, @@ -236,7 +241,7 @@ class RoomVersions: V8 = RoomVersion( "8", RoomDisposition.STABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, @@ -254,7 +259,7 @@ class RoomVersions: V9 = RoomVersion( "9", RoomDisposition.STABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, @@ -272,7 +277,7 @@ class RoomVersions: MSC3787 = RoomVersion( "org.matrix.msc3787", RoomDisposition.UNSTABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, @@ -290,7 +295,7 @@ class RoomVersions: V10 = RoomVersion( "10", RoomDisposition.STABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, @@ -308,7 +313,7 @@ class RoomVersions: MSC2716v4 = RoomVersion( "org.matrix.msc2716v4", RoomDisposition.UNSTABLE, - EventFormatVersions.V3, + EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 389b0c5d53..c7d5ef92fc 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -109,7 +109,7 @@ def validate_event_for_room_version(event: "EventBase") -> None: if not is_invite_via_3pid: raise AuthError(403, "Event not signed by sender's server") - if event.format_version in (EventFormatVersions.V1,): + if event.format_version in (EventFormatVersions.ROOM_V1_V2,): # Only older room versions have event IDs to check. event_id_domain = get_domain_from_id(event.event_id) @@ -716,7 +716,7 @@ def check_redaction( if user_level >= redact_level: return False - if room_version_obj.event_format == EventFormatVersions.V1: + if room_version_obj.event_format == EventFormatVersions.ROOM_V1_V2: redacter_domain = get_domain_from_id(event.event_id) if not isinstance(event.redacts, str): return False diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 39ad2793d9..b2c9119fd0 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -442,7 +442,7 @@ class EventBase(metaclass=abc.ABCMeta): class FrozenEvent(EventBase): - format_version = EventFormatVersions.V1 # All events of this type are V1 + format_version = EventFormatVersions.ROOM_V1_V2 # All events of this type are V1 def __init__( self, @@ -490,7 +490,7 @@ class FrozenEvent(EventBase): class FrozenEventV2(EventBase): - format_version = EventFormatVersions.V2 # All events of this type are V2 + format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2 def __init__( self, @@ -567,7 +567,7 @@ class FrozenEventV2(EventBase): class FrozenEventV3(FrozenEventV2): """FrozenEventV3, which differs from FrozenEventV2 only in the event_id format""" - format_version = EventFormatVersions.V3 # All events of this type are V3 + format_version = EventFormatVersions.ROOM_V4_PLUS # All events of this type are V3 @property def event_id(self) -> str: @@ -597,11 +597,11 @@ def _event_type_from_format_version( `FrozenEvent` """ - if format_version == EventFormatVersions.V1: + if format_version == EventFormatVersions.ROOM_V1_V2: return FrozenEvent - elif format_version == EventFormatVersions.V2: + elif format_version == EventFormatVersions.ROOM_V3: return FrozenEventV2 - elif format_version == EventFormatVersions.V3: + elif format_version == EventFormatVersions.ROOM_V4_PLUS: return FrozenEventV3 else: raise Exception("No event format %r" % (format_version,)) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 17f624b68f..746bd3978d 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -137,7 +137,7 @@ class EventBuilder: # The types of auth/prev events changes between event versions. prev_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] - if format_version == EventFormatVersions.V1: + if format_version == EventFormatVersions.ROOM_V1_V2: auth_events = await self._store.add_event_hashes(auth_event_ids) prev_events = await self._store.add_event_hashes(prev_event_ids) else: @@ -253,7 +253,7 @@ def create_local_event_from_event_dict( time_now = int(clock.time_msec()) - if format_version == EventFormatVersions.V1: + if format_version == EventFormatVersions.ROOM_V1_V2: event_dict["event_id"] = _create_event_id(clock, hostname) event_dict["origin"] = hostname diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 27c8beba25..a6f0104396 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -45,7 +45,7 @@ class EventValidator: """ self.validate_builder(event) - if event.format_version == EventFormatVersions.V1: + if event.format_version == EventFormatVersions.ROOM_V1_V2: EventID.from_string(event.event_id) required = [ diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 4269a98db2..abe2c1971a 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -194,7 +194,7 @@ async def _check_sigs_on_pdu( # event id's domain (normally only the case for joins/leaves), and add additional # checks. Only do this if the room version has a concept of event ID domain # (ie, the room version uses old-style non-hash event IDs). - if room_version.event_format == EventFormatVersions.V1: + if room_version.event_format == EventFormatVersions.ROOM_V1_V2: event_domain = get_domain_from_id(pdu.event_id) if event_domain != sender_domain: try: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 7ee2974bb1..4a4289ee7c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1190,7 +1190,7 @@ class FederationClient(FederationBase): # Otherwise, consider it a legitimate error and raise. err = e.to_synapse_error() if self._is_unknown_endpoint(e, err): - if room_version.event_format != EventFormatVersions.V1: + if room_version.event_format != EventFormatVersions.ROOM_V1_V2: raise SynapseError( 400, "User's homeserver does not support this room version", diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index c836078da6..e687f87eca 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1606,7 +1606,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas logger.info("Invalid prev_events for %s", event_id) continue - if room_version.event_format == EventFormatVersions.V1: + if room_version.event_format == EventFormatVersions.ROOM_V1_V2: for prev_event_tuple in prev_events: if ( not isinstance(prev_event_tuple, list) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 9b997c304d..84f17a9945 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1156,7 +1156,7 @@ class EventsWorkerStore(SQLBaseStore): if format_version is None: # This means that we stored the event before we had the concept # of a event format version, so it must be a V1 event. - format_version = EventFormatVersions.V1 + format_version = EventFormatVersions.ROOM_V1_V2 room_version_id = row.room_version_id @@ -1186,10 +1186,10 @@ class EventsWorkerStore(SQLBaseStore): # # So, the following approximations should be adequate. - if format_version == EventFormatVersions.V1: + if format_version == EventFormatVersions.ROOM_V1_V2: # if it's event format v1 then it must be room v1 or v2 room_version = RoomVersions.V1 - elif format_version == EventFormatVersions.V2: + elif format_version == EventFormatVersions.ROOM_V3: # if it's event format v2 then it must be room v3 room_version = RoomVersions.V3 else: diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 46d829b062..67401272ac 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -254,7 +254,7 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase): "room_id": self.room_id, "json": json.dumps(event_json), "internal_metadata": "{}", - "format_version": EventFormatVersions.V3, + "format_version": EventFormatVersions.ROOM_V4_PLUS, }, ) ) diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index d92a9ac5b7..a6679e1312 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -513,7 +513,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): def prev_event_format(prev_event_id: str) -> Union[Tuple[str, dict], str]: """Account for differences in prev_events format across room versions""" - if room_version.event_format == EventFormatVersions.V1: + if room_version.event_format == EventFormatVersions.ROOM_V1_V2: return prev_event_id, {} return prev_event_id diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index e42d7b9ba0..f4d9fba0a1 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -821,7 +821,7 @@ def _alias_event(room_version: RoomVersion, sender: str, **kwargs) -> EventBase: def _build_auth_dict_for_room_version( room_version: RoomVersion, auth_events: Iterable[EventBase] ) -> List: - if room_version.event_format == EventFormatVersions.V1: + if room_version.event_format == EventFormatVersions.ROOM_V1_V2: return [(e.event_id, "not_used") for e in auth_events] else: return [e.event_id for e in auth_events] @@ -871,7 +871,7 @@ event_count = 0 def _maybe_get_event_id_dict_for_room_version(room_version: RoomVersion) -> dict: """If this room version needs it, generate an event id""" - if room_version.event_format != EventFormatVersions.V1: + if room_version.event_format != EventFormatVersions.ROOM_V1_V2: return {} global event_count -- cgit 1.4.1 From d3d9ca156e323fe194b1bcb1af1628f65a2f3c1c Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 7 Sep 2022 11:03:32 +0000 Subject: Cancel the processing of key query requests when they time out. (#13680) --- changelog.d/13680.feature | 1 + synapse/api/auth.py | 5 +++ synapse/handlers/device.py | 3 ++ synapse/handlers/e2e_keys.py | 40 +++++++++++++--------- synapse/rest/client/keys.py | 6 ++-- synapse/storage/controllers/state.py | 4 +++ synapse/storage/databases/main/devices.py | 4 +++ synapse/storage/databases/main/end_to_end_keys.py | 5 ++- synapse/storage/databases/main/event_federation.py | 2 ++ synapse/storage/databases/main/events_worker.py | 4 +++ synapse/storage/databases/main/roommember.py | 2 ++ synapse/storage/databases/main/state.py | 2 ++ synapse/storage/databases/main/stream.py | 2 ++ synapse/storage/databases/state/store.py | 3 ++ .../storage/util/partial_state_events_tracker.py | 3 ++ synapse/types.py | 5 +++ tests/http/server/_base.py | 10 +++++- tests/rest/client/test_keys.py | 29 ++++++++++++++++ 18 files changed, 110 insertions(+), 20 deletions(-) create mode 100644 changelog.d/13680.feature diff --git a/changelog.d/13680.feature b/changelog.d/13680.feature new file mode 100644 index 0000000000..4234c7e082 --- /dev/null +++ b/changelog.d/13680.feature @@ -0,0 +1 @@ +Cancel the processing of key query requests when they time out. \ No newline at end of file diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 9a1aea083f..8e54ef84b2 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -38,6 +38,7 @@ from synapse.logging.opentracing import ( trace, ) from synapse.types import Requester, create_requester +from synapse.util.cancellation import cancellable if TYPE_CHECKING: from synapse.server import HomeServer @@ -118,6 +119,7 @@ class Auth: errcode=Codes.NOT_JOINED, ) + @cancellable async def get_user_by_req( self, request: SynapseRequest, @@ -166,6 +168,7 @@ class Auth: parent_span.set_tag("appservice_id", requester.app_service.id) return requester + @cancellable async def _wrapped_get_user_by_req( self, request: SynapseRequest, @@ -281,6 +284,7 @@ class Auth: 403, "Application service has not registered this user (%s)" % user_id ) + @cancellable async def _get_appservice_user(self, request: Request) -> Optional[Requester]: """ Given a request, reads the request parameters to determine: @@ -523,6 +527,7 @@ class Auth: return bool(query_params) or bool(auth_headers) @staticmethod + @cancellable def get_access_token_from_request(request: Request) -> str: """Extracts the access_token from the request. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9c2c3a0e68..c5ac169644 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -52,6 +52,7 @@ from synapse.types import ( from synapse.util import stringutils from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.cancellation import cancellable from synapse.util.metrics import measure_func from synapse.util.retryutils import NotRetryingDestination @@ -124,6 +125,7 @@ class DeviceWorkerHandler: return device + @cancellable async def get_device_changes_in_shared_rooms( self, user_id: str, room_ids: Collection[str], from_token: StreamToken ) -> Collection[str]: @@ -163,6 +165,7 @@ class DeviceWorkerHandler: @trace @measure_func("device.get_user_ids_changed") + @cancellable async def get_user_ids_changed( self, user_id: str, from_token: StreamToken ) -> JsonDict: diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index c938339ddd..ec81639c78 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -37,7 +37,8 @@ from synapse.types import ( get_verify_key_from_cross_signing_key, ) from synapse.util import json_decoder, unwrapFirstError -from synapse.util.async_helpers import Linearizer +from synapse.util.async_helpers import Linearizer, delay_cancellation +from synapse.util.cancellation import cancellable from synapse.util.retryutils import NotRetryingDestination if TYPE_CHECKING: @@ -91,6 +92,7 @@ class E2eKeysHandler: ) @trace + @cancellable async def query_devices( self, query_body: JsonDict, @@ -208,22 +210,26 @@ class E2eKeysHandler: r[user_id] = remote_queries[user_id] # Now fetch any devices that we don't have in our cache + # TODO It might make sense to propagate cancellations into the + # deferreds which are querying remote homeservers. await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self._query_devices_for_destination, - results, - cross_signing_keys, - failures, - destination, - queries, - timeout, - ) - for destination, queries in remote_queries_not_in_cache.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) + delay_cancellation( + defer.gatherResults( + [ + run_in_background( + self._query_devices_for_destination, + results, + cross_signing_keys, + failures, + destination, + queries, + timeout, + ) + for destination, queries in remote_queries_not_in_cache.items() + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) ) ret = {"device_keys": results, "failures": failures} @@ -347,6 +353,7 @@ class E2eKeysHandler: return + @cancellable async def get_cross_signing_keys_from_cache( self, query: Iterable[str], from_user_id: Optional[str] ) -> Dict[str, Dict[str, dict]]: @@ -393,6 +400,7 @@ class E2eKeysHandler: } @trace + @cancellable async def query_local_devices( self, query: Mapping[str, Optional[List[str]]] ) -> Dict[str, Dict[str, dict]]: diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index a395694fa5..f653d2a3e1 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -27,9 +27,9 @@ from synapse.http.servlet import ( ) from synapse.http.site import SynapseRequest from synapse.logging.opentracing import log_kv, set_tag +from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.types import JsonDict, StreamToken - -from ._base import client_patterns, interactive_auth_handler +from synapse.util.cancellation import cancellable if TYPE_CHECKING: from synapse.server import HomeServer @@ -156,6 +156,7 @@ class KeyQueryServlet(RestServlet): self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() + @cancellable async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() @@ -199,6 +200,7 @@ class KeyChangesServlet(RestServlet): self.device_handler = hs.get_device_handler() self.store = hs.get_datastores().main + @cancellable async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index ba5380ce3e..bbe568bf05 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -36,6 +36,7 @@ from synapse.storage.util.partial_state_events_tracker import ( PartialStateEventsTracker, ) from synapse.types import MutableStateMap, StateMap +from synapse.util.cancellation import cancellable if TYPE_CHECKING: from synapse.server import HomeServer @@ -229,6 +230,7 @@ class StateStorageController: @trace @tag_args + @cancellable async def get_state_ids_for_events( self, event_ids: Collection[str], @@ -350,6 +352,7 @@ class StateStorageController: @trace @tag_args + @cancellable async def get_state_group_for_events( self, event_ids: Collection[str], @@ -398,6 +401,7 @@ class StateStorageController: event_id, room_id, prev_group, delta_ids, current_state_ids ) + @cancellable async def get_current_state_ids( self, room_id: str, diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index ca0fe8c4be..5d700ca6c3 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -53,6 +53,7 @@ from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.lrucache import LruCache from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr @@ -668,6 +669,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore): ... @trace + @cancellable async def get_user_devices_from_cache( self, query_list: List[Tuple[str, Optional[str]]] ) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]: @@ -743,6 +745,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore): return self._device_list_stream_cache.get_all_entities_changed(from_key) + @cancellable async def get_users_whose_devices_changed( self, from_key: int, @@ -1221,6 +1224,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore): desc="get_min_device_lists_changes_in_room", ) + @cancellable async def get_device_list_changes_in_rooms( self, room_ids: Collection[str], from_id: int ) -> Optional[Set[str]]: diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 46c0d06157..8e9e1b0b4b 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -50,6 +50,7 @@ from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -135,6 +136,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker return now_stream_id, [] @trace + @cancellable async def get_e2e_device_keys_for_cs_api( self, query_list: List[Tuple[str, Optional[str]]] ) -> Dict[str, Dict[str, JsonDict]]: @@ -197,6 +199,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ... @trace + @cancellable async def get_e2e_device_keys_and_signatures( self, query_list: Collection[Tuple[str, Optional[str]]], @@ -887,6 +890,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker return keys + @cancellable async def get_e2e_cross_signing_keys_bulk( self, user_ids: List[str], from_user_id: Optional[str] = None ) -> Dict[str, Optional[Dict[str, JsonDict]]]: @@ -902,7 +906,6 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker keys were not found, either their user ID will not be in the dict, or their user ID will map to None. """ - result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids) if from_user_id: diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index e687f87eca..ca47a22bf1 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -48,6 +48,7 @@ from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache +from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -976,6 +977,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas return int(min_depth) if min_depth is not None else None + @cancellable async def get_forward_extremities_for_room_at_stream_ordering( self, room_id: str, stream_ordering: int ) -> List[str]: diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 84f17a9945..52914febf9 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -81,6 +81,7 @@ from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred, delay_cancellation from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.lrucache import AsyncLruCache +from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -339,6 +340,7 @@ class EventsWorkerStore(SQLBaseStore): ) -> Optional[EventBase]: ... + @cancellable async def get_event( self, event_id: str, @@ -433,6 +435,7 @@ class EventsWorkerStore(SQLBaseStore): @trace @tag_args + @cancellable async def get_events_as_list( self, event_ids: Collection[str], @@ -584,6 +587,7 @@ class EventsWorkerStore(SQLBaseStore): return events + @cancellable async def _get_events_from_cache_or_db( self, event_ids: Iterable[str], allow_rejected: bool = False ) -> Dict[str, EventCacheEntry]: diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 4f0adb136a..a77e49dc66 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -55,6 +55,7 @@ from synapse.types import JsonDict, PersistedEventPosition, StateMap, get_domain from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList +from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -770,6 +771,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): _get_users_server_still_shares_room_with_txn, ) + @cancellable async def get_rooms_for_user( self, user_id: str, on_invalidate: Optional[Callable[[], None]] = None ) -> FrozenSet[str]: diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 0b10af0e58..e607ccfdc9 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -36,6 +36,7 @@ from synapse.storage.state import StateFilter from synapse.types import JsonDict, JsonMapping, StateMap from synapse.util.caches import intern_string from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -281,6 +282,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) # FIXME: how should this be cached? + @cancellable async def get_partial_filtered_current_state_ids( self, room_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index a347430aa7..3f9bfaeac5 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -72,6 +72,7 @@ from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import PersistedEventPosition, RoomStreamToken from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.cancellation import cancellable if TYPE_CHECKING: from synapse.server import HomeServer @@ -597,6 +598,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return ret, key + @cancellable async def get_membership_changes_for_user( self, user_id: str, diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index bb64543c1f..f8cfcaca83 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -31,6 +31,7 @@ from synapse.storage.util.sequence import build_sequence_generator from synapse.types import MutableStateMap, StateKey, StateMap from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache +from synapse.util.cancellation import cancellable if TYPE_CHECKING: from synapse.server import HomeServer @@ -156,6 +157,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): "get_state_group_delta", _get_state_group_delta_txn ) + @cancellable async def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter ) -> Dict[int, StateMap[str]]: @@ -235,6 +237,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return state_filter.filter_state(state_dict_ids), not missing_types + @cancellable async def _get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Dict[int, MutableStateMap[str]]: diff --git a/synapse/storage/util/partial_state_events_tracker.py b/synapse/storage/util/partial_state_events_tracker.py index b4bf49dace..8d8894d1d5 100644 --- a/synapse/storage/util/partial_state_events_tracker.py +++ b/synapse/storage/util/partial_state_events_tracker.py @@ -24,6 +24,7 @@ from synapse.logging.opentracing import trace_with_opname from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore from synapse.util import unwrapFirstError +from synapse.util.cancellation import cancellable logger = logging.getLogger(__name__) @@ -60,6 +61,7 @@ class PartialStateEventsTracker: o.callback(None) @trace_with_opname("PartialStateEventsTracker.await_full_state") + @cancellable async def await_full_state(self, event_ids: Collection[str]) -> None: """Wait for all the given events to have full state. @@ -154,6 +156,7 @@ class PartialCurrentStateTracker: o.callback(None) @trace_with_opname("PartialCurrentStateTracker.await_full_state") + @cancellable async def await_full_state(self, room_id: str) -> None: # We add the deferred immediately so that the DB call to check for # partial state doesn't race when we unpartial the room. diff --git a/synapse/types.py b/synapse/types.py index 668d48d646..ec44601f54 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -52,6 +52,7 @@ from twisted.internet.interfaces import ( ) from synapse.api.errors import Codes, SynapseError +from synapse.util.cancellation import cancellable from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: @@ -699,7 +700,11 @@ class StreamToken: START: ClassVar["StreamToken"] @classmethod + @cancellable async def from_string(cls, store: "DataStore", string: str) -> "StreamToken": + """ + Creates a RoomStreamToken from its textual representation. + """ try: keys = string.split(cls._SEPARATOR) while len(keys) < len(attr.fields(cls)): diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index 5726e60cee..5071f83574 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -140,6 +140,8 @@ def make_request_with_cancellation_test( method: str, path: str, content: Union[bytes, str, JsonDict] = b"", + *, + token: Optional[str] = None, ) -> FakeChannel: """Performs a request repeatedly, disconnecting at successive `await`s, until one completes. @@ -211,7 +213,13 @@ def make_request_with_cancellation_test( with deferred_patch.patch(): # Start the request. channel = make_request( - reactor, site, method, path, content, await_result=False + reactor, + site, + method, + path, + content, + await_result=False, + access_token=token, ) request = channel.request diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index bbc8e74243..741fecea77 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -19,6 +19,7 @@ from synapse.rest import admin from synapse.rest.client import keys, login from tests import unittest +from tests.http.server._base import make_request_with_cancellation_test class KeyQueryTestCase(unittest.HomeserverTestCase): @@ -89,3 +90,31 @@ class KeyQueryTestCase(unittest.HomeserverTestCase): Codes.BAD_JSON, channel.result, ) + + def test_key_query_cancellation(self) -> None: + """ + Tests that /keys/query is cancellable and does not swallow the + CancelledError. + """ + self.register_user("alice", "wonderland") + alice_token = self.login("alice", "wonderland") + + bob = self.register_user("bob", "uncle") + + channel = make_request_with_cancellation_test( + "test_key_query_cancellation", + self.reactor, + self.site, + "POST", + "/_matrix/client/r0/keys/query", + { + "device_keys": { + # Empty list means we request keys for all bob's devices + bob: [], + }, + }, + token=alice_token, + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertIn(bob, channel.json_body["device_keys"]) -- cgit 1.4.1 From b58386e37e30e920332e4b04011b528a66a39fad Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 7 Sep 2022 12:16:10 +0100 Subject: A second batch of Pydantic models for rest/client/account.py (#13687) --- changelog.d/13687.feature | 1 + synapse/http/servlet.py | 19 +++++++++++++-- synapse/rest/client/account.py | 54 ++++++++++++++++++++---------------------- synapse/rest/client/models.py | 24 +++++++++++++++---- 4 files changed, 64 insertions(+), 34 deletions(-) create mode 100644 changelog.d/13687.feature diff --git a/changelog.d/13687.feature b/changelog.d/13687.feature new file mode 100644 index 0000000000..dac53ec122 --- /dev/null +++ b/changelog.d/13687.feature @@ -0,0 +1 @@ +Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken) and [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status). \ No newline at end of file diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 26aaabfb34..80acbdcf3c 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -28,7 +28,8 @@ from typing import ( overload, ) -from pydantic import BaseModel, ValidationError +from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError +from pydantic.error_wrappers import ErrorWrapper from typing_extensions import Literal from twisted.web.server import Request @@ -714,7 +715,21 @@ def parse_and_validate_json_object_from_request( try: instance = model_type.parse_obj(content) except ValidationError as e: - raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=Codes.BAD_JSON) + # Choose a matrix error code. The catch-all is BAD_JSON, but we try to find a + # more specific error if possible (which occasionally helps us to be spec- + # compliant) This is a bit awkward because the spec's error codes aren't very + # clear-cut: BAD_JSON arguably overlaps with MISSING_PARAM and INVALID_PARAM. + errcode = Codes.BAD_JSON + + raw_errors = e.raw_errors + if len(raw_errors) == 1 and isinstance(raw_errors[0], ErrorWrapper): + raw_error = raw_errors[0].exc + if isinstance(raw_error, MissingError): + errcode = Codes.MISSING_PARAM + elif isinstance(raw_error, PydanticValueError): + errcode = Codes.INVALID_PARAM + + raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=errcode) return instance diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 1f9a8ccc23..a09aaf3448 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -15,7 +15,7 @@ # limitations under the License. import logging import random -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple from urllib.parse import urlparse from pydantic import StrictBool, StrictStr, constr @@ -41,7 +41,11 @@ from synapse.http.servlet import ( from synapse.http.site import SynapseRequest from synapse.metrics import threepid_send_requests from synapse.push.mailer import Mailer -from synapse.rest.client.models import AuthenticationData, EmailRequestTokenBody +from synapse.rest.client.models import ( + AuthenticationData, + EmailRequestTokenBody, + MsisdnRequestTokenBody, +) from synapse.rest.models import RequestBodyModel from synapse.types import JsonDict from synapse.util.msisdn import phone_number_to_msisdn @@ -400,23 +404,16 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - body = parse_json_object_from_request(request) - assert_params_in_dict( - body, ["client_secret", "country", "phone_number", "send_attempt"] + body = parse_and_validate_json_object_from_request( + request, MsisdnRequestTokenBody ) - client_secret = body["client_secret"] - assert_valid_client_secret(client_secret) - - country = body["country"] - phone_number = body["phone_number"] - send_attempt = body["send_attempt"] - next_link = body.get("next_link") # Optional param - - msisdn = phone_number_to_msisdn(country, phone_number) + msisdn = phone_number_to_msisdn(body.country, body.phone_number) if not await check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( 403, + # TODO: is this error message accurate? Looks like we've only rejected + # this phone number, not necessarily all phone numbers "Account phone numbers are not authorized on this server", Codes.THREEPID_DENIED, ) @@ -425,9 +422,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): request, "msisdn", msisdn ) - if next_link: + if body.next_link: # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + assert_valid_next_link(self.hs, body.next_link) existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn) @@ -454,15 +451,15 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): ret = await self.identity_handler.requestMsisdnToken( self.hs.config.registration.account_threepid_delegate_msisdn, - country, - phone_number, - client_secret, - send_attempt, - next_link, + body.country, + body.phone_number, + body.client_secret, + body.send_attempt, + body.next_link, ) threepid_send_requests.labels(type="msisdn", reason="add_threepid").observe( - send_attempt + body.send_attempt ) return 200, ret @@ -845,17 +842,18 @@ class AccountStatusRestServlet(RestServlet): self._auth = hs.get_auth() self._account_handler = hs.get_account_handler() + class PostBody(RequestBodyModel): + # TODO: we could validate that each user id is an mxid here, and/or parse it + # as a UserID + user_ids: List[StrictStr] + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await self._auth.get_user_by_req(request) - body = parse_json_object_from_request(request) - if "user_ids" not in body: - raise SynapseError( - 400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM - ) + body = parse_and_validate_json_object_from_request(request, self.PostBody) statuses, failures = await self._account_handler.get_account_statuses( - body["user_ids"], + body.user_ids, allow_remote=True, ) diff --git a/synapse/rest/client/models.py b/synapse/rest/client/models.py index 3150602997..6278450c70 100644 --- a/synapse/rest/client/models.py +++ b/synapse/rest/client/models.py @@ -25,8 +25,8 @@ class AuthenticationData(RequestBodyModel): (The name "Authentication Data" is taken directly from the spec.) - Additional keys will be present, depending on the `type` field. Use `.dict()` to - access them. + Additional keys will be present, depending on the `type` field. Use + `.dict(exclude_unset=True)` to access them. """ class Config: @@ -36,7 +36,7 @@ class AuthenticationData(RequestBodyModel): type: Optional[StrictStr] = None -class EmailRequestTokenBody(RequestBodyModel): +class ThreePidRequestTokenBody(RequestBodyModel): if TYPE_CHECKING: client_secret: StrictStr else: @@ -47,7 +47,7 @@ class EmailRequestTokenBody(RequestBodyModel): max_length=255, strict=True, ) - email: StrictStr + id_server: Optional[StrictStr] id_access_token: Optional[StrictStr] next_link: Optional[StrictStr] @@ -61,9 +61,25 @@ class EmailRequestTokenBody(RequestBodyModel): raise ValueError("id_access_token is required if an id_server is supplied.") return token + +class EmailRequestTokenBody(ThreePidRequestTokenBody): + email: StrictStr + # Canonicalise the email address. The addresses are all stored canonicalised # in the database. This allows the user to reset his password without having to # know the exact spelling (eg. upper and lower case) of address in the database. # Without this, an email stored in the database as "foo@bar.com" would cause # user requests for "FOO@bar.com" to raise a Not Found error. _email_validator = validator("email", allow_reuse=True)(validate_email) + + +if TYPE_CHECKING: + ISO3116_1_Alpha_2 = StrictStr +else: + # Per spec: two-letter uppercase ISO-3166-1-alpha-2 + ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True) + + +class MsisdnRequestTokenBody(ThreePidRequestTokenBody): + country: ISO3116_1_Alpha_2 + phone_number: StrictStr -- cgit 1.4.1 From 77f39864511d0c0217d41691cc5395fde9e0bced Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 7 Sep 2022 13:07:42 +0100 Subject: Define SQLite compat policy (#13728) --- changelog.d/13728.doc | 1 + docs/deprecation_policy.md | 15 +++++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13728.doc diff --git a/changelog.d/13728.doc b/changelog.d/13728.doc new file mode 100644 index 0000000000..75ca7b7ec3 --- /dev/null +++ b/changelog.d/13728.doc @@ -0,0 +1 @@ +Define Synapse's compatability policy for SQLite versions. diff --git a/docs/deprecation_policy.md b/docs/deprecation_policy.md index b8a46e3d60..46c18d7d32 100644 --- a/docs/deprecation_policy.md +++ b/docs/deprecation_policy.md @@ -1,9 +1,9 @@ Deprecation Policy for Platform Dependencies ============================================ -Synapse has a number of platform dependencies, including Python and PostgreSQL. -This document outlines the policy towards which versions we support, and when we -drop support for versions in the future. +Synapse has a number of platform dependencies, including Python, Rust, +PostgreSQL and SQLite. This document outlines the policy towards which versions +we support, and when we drop support for versions in the future. Policy @@ -17,12 +17,14 @@ Details on the upstream support life cycles for Python and PostgreSQL are documented at [https://endoflife.date/python](https://endoflife.date/python) and [https://endoflife.date/postgresql](https://endoflife.date/postgresql). - A Rust compiler is required to build Synapse from source. For any given release the minimum required version may be bumped up to a recent Rust version, and so people building from source should ensure they can fetch recent versions of Rust (e.g. by using [rustup](https://rustup.rs/)). +The oldest supported version of SQLite is the version +[provided](https://packages.debian.org/buster/libsqlite3-0) by +[Debian oldstable](https://wiki.debian.org/DebianOldStable). Context ------- @@ -44,3 +46,8 @@ generally bump their minimum support Rust versions frequently. In general, the Synapse team will try to avoid updating the dependency on Rust to the absolute latest version, but introducing a formal policy is hard given the constraints of the ecosystem. + +On a similar note, SQLite does not generally have a concept of "supported +release"; bugfixes are published for the latest minor release only. We chose to +track Debian's oldstable as this is relatively conservative, predictably updated +and is consistent with the `.deb` packages released by Matrix.org. \ No newline at end of file -- cgit 1.4.1 From c46fecd1f29b1d2b8756ab2dc5979c24822bfb93 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 7 Sep 2022 14:46:11 +0100 Subject: Correct out-of-date doc for `event_cache_size` (#13726) --- changelog.d/13726.doc | 1 + docs/usage/configuration/config_documentation.md | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13726.doc diff --git a/changelog.d/13726.doc b/changelog.d/13726.doc new file mode 100644 index 0000000000..ab840e1a92 --- /dev/null +++ b/changelog.d/13726.doc @@ -0,0 +1 @@ +Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse 1.22. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 757957a1d5..ae490d13a4 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1069,8 +1069,10 @@ Options related to caching. --- ### `event_cache_size` -The number of events to cache in memory. Not affected by -`caches.global_factor` and is not part of the `caches` section. Defaults to 10K. +The number of events to cache in memory. Defaults to 10K. Like other caches, +this is affected by `caches.global_factor` (see below). + +Note that this option is not part of the `caches` section. Example configuration: ```yaml -- cgit 1.4.1 From dc0e896b68beb7518fc81f897ac1006bf9f1858c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Sep 2022 14:56:59 +0100 Subject: Add some rust caching to CI (#13735) --- .github/workflows/tests.yml | 5 +++++ changelog.d/13735.misc | 1 + 2 files changed, 6 insertions(+) create mode 100644 changelog.d/13735.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5f96bdfa7f..50dc8e30d4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -144,6 +144,7 @@ jobs: with: toolchain: 1.61.0 override: true + - uses: Swatinem/rust-cache@v2 # There aren't wheels for some of the older deps, so we need to install # their build dependencies @@ -253,11 +254,14 @@ jobs: - uses: actions/checkout@v2 - name: Prepare test blacklist run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers + - name: Install Rust uses: actions-rs/toolchain@v1 with: toolchain: 1.61.0 override: true + - uses: Swatinem/rust-cache@v2 + - name: Run SyTest run: /bootstrap.sh synapse working-directory: /src @@ -369,6 +373,7 @@ jobs: with: toolchain: 1.61.0 override: true + - uses: Swatinem/rust-cache@v2 - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh diff --git a/changelog.d/13735.misc b/changelog.d/13735.misc new file mode 100644 index 0000000000..2e0dd68a0f --- /dev/null +++ b/changelog.d/13735.misc @@ -0,0 +1 @@ +Add a stub Rust crate. -- cgit 1.4.1 From 8d7fcf9b761941d73af4f150e1173cafe86714a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Sep 2022 15:07:06 +0100 Subject: Fix latest deps CI (#13734) --- .github/workflows/latest_deps.yml | 20 +++++++++++++++++--- .github/workflows/twisted_trunk.yml | 24 ++++++++++++++++++++++++ changelog.d/13734.misc | 1 + 3 files changed, 42 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13734.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 7dac617c4b..07229e56bd 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -5,7 +5,7 @@ # # As an overview this workflow: # - checks out develop, -# - installs from source, pulling in the dependencies like a fresh `pip install` would, and +# - installs from source, pulling in the dependencies like a fresh `pip install` would, and # - runs mypy and test suites in that checkout. # # Based on the twisted trunk CI job. @@ -26,12 +26,19 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + - uses: Swatinem/rust-cache@v2 + # The dev dependencies aren't exposed in the wheel metadata (at least with current # poetry-core versions), so we install with poetry. - uses: matrix-org/setup-python-poetry@v1 with: python-version: "3.x" - poetry-version: "1.2.0b1" + poetry-version: "1.2.0" extras: "all" # Dump installed versions for debugging. - run: poetry run pip list > before.txt @@ -53,6 +60,14 @@ jobs: steps: - uses: actions/checkout@v2 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + - uses: Swatinem/rust-cache@v2 + - run: sudo apt-get -qq install xmlsec1 - name: Set up PostgreSQL ${{ matrix.postgres-version }} if: ${{ matrix.postgres-version }} @@ -187,4 +202,3 @@ jobs: with: update_existing: true filename: .ci/latest_deps_build_failed_issue_template.md - diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 0906101cc1..8fa2fbdea0 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -16,6 +16,14 @@ jobs: steps: - uses: actions/checkout@v2 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + - uses: Swatinem/rust-cache@v2 + - uses: matrix-org/setup-python-poetry@v1 with: python-version: "3.x" @@ -34,6 +42,14 @@ jobs: steps: - uses: actions/checkout@v2 - run: sudo apt-get -qq install xmlsec1 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + - uses: Swatinem/rust-cache@v2 + - uses: matrix-org/setup-python-poetry@v1 with: python-version: "3.x" @@ -66,6 +82,14 @@ jobs: steps: - uses: actions/checkout@v2 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + - uses: Swatinem/rust-cache@v2 + - name: Patch dependencies # Note: The poetry commands want to create a virtualenv in /src/.venv/, # but the sytest-synapse container expects it to be in /venv/. diff --git a/changelog.d/13734.misc b/changelog.d/13734.misc new file mode 100644 index 0000000000..2e0dd68a0f --- /dev/null +++ b/changelog.d/13734.misc @@ -0,0 +1 @@ +Add a stub Rust crate. -- cgit 1.4.1 From d4d3249ded000219a4f875943632c3d0f928d58d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 7 Sep 2022 11:41:52 -0500 Subject: Instrument `get_metadata_for_events` for tracing (#13730) When backfilling, `_get_state_ids_after_missing_prev_event` calls [`get_metadata_for_events`](https://github.com/matrix-org/synapse/blob/26bc26586b4b95d63ce7e453e9312469843f796e/synapse/handlers/federation_event.py#L1133). For `#matrix:matrix.org`, it's called with 77k `state_events` which means 77 calls to the database and takes 28 seconds. --- changelog.d/13730.misc | 1 + synapse/storage/databases/main/state.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/13730.misc diff --git a/changelog.d/13730.misc b/changelog.d/13730.misc new file mode 100644 index 0000000000..06da6581a4 --- /dev/null +++ b/changelog.d/13730.misc @@ -0,0 +1 @@ +Instrument `get_metadata_for_events` for understandable traces in Jaeger. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index e607ccfdc9..af7bebee80 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -23,6 +23,7 @@ from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase from synapse.events.snapshot import EventContext +from synapse.logging.opentracing import trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -143,6 +144,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return room_version + @trace async def get_metadata_for_events( self, event_ids: Collection[str] ) -> Dict[str, EventMetadata]: -- cgit 1.4.1 From b7e4bfd005a804ed0cd3d204c367b11565b76df6 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 8 Sep 2022 10:18:03 +0000 Subject: Fix a bug where Synapse fails to start if a signing key file contains an empty line. (#13738) --- changelog.d/13738.bugfix | 1 + synapse/config/key.py | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13738.bugfix diff --git a/changelog.d/13738.bugfix b/changelog.d/13738.bugfix new file mode 100644 index 0000000000..d64fa0b4de --- /dev/null +++ b/changelog.d/13738.bugfix @@ -0,0 +1 @@ +Fix a bug where Synapse fails to start if a signing key file contains an empty line. \ No newline at end of file diff --git a/synapse/config/key.py b/synapse/config/key.py index cc75efdf8f..f3dc4df695 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -217,7 +217,18 @@ class KeyConfig(Config): signing_keys = self.read_file(signing_key_path, name) try: - return read_signing_keys(signing_keys.splitlines(True)) + loaded_signing_keys = read_signing_keys( + [ + signing_key_line + for signing_key_line in signing_keys.splitlines(keepends=False) + if signing_key_line.strip() + ] + ) + + if not loaded_signing_keys: + raise ConfigError(f"No signing keys in file {signing_key_path}") + + return loaded_signing_keys except Exception as e: raise ConfigError("Error reading %s: %s" % (name, str(e))) -- cgit 1.4.1 From a7c71686ca6c6b8e2dd3ba005669a4707e56869f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 8 Sep 2022 12:00:03 +0100 Subject: Add minimum version bump for sqlite to the release notes (#13742) * Notify that SQLite min version will be bumped * Mention in upgrade notes Co-authored-by: reivilibre --- CHANGES.md | 7 ++++++- docs/upgrade.md | 15 +++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ad277f32e5..c2c628d1a5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,11 +8,16 @@ notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v167 The minimum version of `poetry` supported for managing source checkouts is now 1.2.0. -Notice: from the next major release (v1.68.0) installing Synapse from a source +**Notice:** from the next major release (v1.68.0) installing Synapse from a source checkout will require a recent Rust compiler. Those using packages or `pip install matrix-synapse` will not be affected. See the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). +**Notice:** from the next major release (v1.68.0), running Synapse with a SQLite +database will require SQLite version 3.27.0 or higher. (The [current minimum + version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).) +See [#12983](https://github.com/matrix-org/synapse/issues/12983) for more details. + Features -------- diff --git a/docs/upgrade.md b/docs/upgrade.md index 023ca0a30b..9f165551fa 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -119,6 +119,21 @@ will require a recent Rust compiler. Those using packages or The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/) +## SQLite version requirement in the next release + +From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or +higher. Synapse v1.67.0 will be the last major release supporting SQLite +versions 3.22 to 3.26. + +Those using docker images or Debian packages from Matrix.org will not be +affected. If you have installed from source, you should check the version of +SQLite used by Python with: + +```shell +python -c "import sqlite3; print(sqlite3.sqlite_version)" +``` + +If this is too old, refer to your distribution for advice on upgrading. # Upgrading to v1.66.0 -- cgit 1.4.1 From 9d118425627c332d30eae8ebe5c3fc519416100f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 8 Sep 2022 12:04:29 +0100 Subject: Also cite upgrade notes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index c2c628d1a5..0300fcbd30 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -16,7 +16,7 @@ notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v167 **Notice:** from the next major release (v1.68.0), running Synapse with a SQLite database will require SQLite version 3.27.0 or higher. (The [current minimum version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).) -See [#12983](https://github.com/matrix-org/synapse/issues/12983) for more details. +See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). for more details. Features -- cgit 1.4.1 From 1cc729c177ec734b3e7c56c80bc2b32e100c0fe9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Sep 2022 13:58:31 +0100 Subject: Fix latest deps (#13743) --- .github/workflows/latest_deps.yml | 14 ++++++++++++++ changelog.d/13743.misc | 1 + 2 files changed, 15 insertions(+) create mode 100644 changelog.d/13743.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 07229e56bd..8366ac9393 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -84,6 +84,12 @@ jobs: if: ${{ matrix.postgres-version }} timeout-minutes: 2 run: until pg_isready -h localhost; do sleep 1; done + + # We nuke the local copy, as we've installed synapse into the virtualenv + # (rather than use an editable install, which we no longer support). If we + # don't do this then python can't find the native lib. + - run: rm -rf synapse/ + - run: python -m twisted.trial --jobs=2 tests env: SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }} @@ -128,6 +134,14 @@ jobs: steps: - uses: actions/checkout@v2 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + - uses: Swatinem/rust-cache@v2 + - name: Ensure sytest runs `pip install` # Delete the lockfile so sytest will `pip install` rather than `poetry install` run: rm /src/poetry.lock diff --git a/changelog.d/13743.misc b/changelog.d/13743.misc new file mode 100644 index 0000000000..2e0dd68a0f --- /dev/null +++ b/changelog.d/13743.misc @@ -0,0 +1 @@ +Add a stub Rust crate. -- cgit 1.4.1 From 526f84bc2ea78a44ef612bf79caf5cfb7966c1ed Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 8 Sep 2022 14:01:42 +0000 Subject: Fix Prometheus recording rules to not use legacy metric names. (#13718) --- changelog.d/13718.misc | 1 + contrib/prometheus/synapse-v1.rules | 21 --------------------- contrib/prometheus/synapse-v2.rules | 22 +++++++++++----------- synapse/app/phone_stats_home.py | 6 +++--- synapse/federation/sender/__init__.py | 4 ++-- synapse/metrics/_legacy_exposition.py | 5 +++++ 6 files changed, 22 insertions(+), 37 deletions(-) create mode 100644 changelog.d/13718.misc delete mode 100644 contrib/prometheus/synapse-v1.rules diff --git a/changelog.d/13718.misc b/changelog.d/13718.misc new file mode 100644 index 0000000000..07ace50b12 --- /dev/null +++ b/changelog.d/13718.misc @@ -0,0 +1 @@ +Add experimental configuration option to allow disabling legacy Prometheus metric names. \ No newline at end of file diff --git a/contrib/prometheus/synapse-v1.rules b/contrib/prometheus/synapse-v1.rules deleted file mode 100644 index 4c900ba537..0000000000 --- a/contrib/prometheus/synapse-v1.rules +++ /dev/null @@ -1,21 +0,0 @@ -synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0) -synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0) - -synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method) -synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet) - -synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet) - -synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m]) -synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s]) - -synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0 -synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0 -synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job) - -synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0 -synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0 -synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job) - -synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0 -synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0 diff --git a/contrib/prometheus/synapse-v2.rules b/contrib/prometheus/synapse-v2.rules index 7e405bf7f0..a5e6a735cd 100644 --- a/contrib/prometheus/synapse-v2.rules +++ b/contrib/prometheus/synapse-v2.rules @@ -20,18 +20,18 @@ groups: expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)' - record: 'synapse_cache:hit_ratio_5m' - expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])' + expr: 'rate(synapse_util_caches_cache_hits[5m]) / rate(synapse_util_caches_cache[5m])' - record: 'synapse_cache:hit_ratio_30s' - expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])' + expr: 'rate(synapse_util_caches_cache_hits[30s]) / rate(synapse_util_caches_cache[30s])' - record: 'synapse_federation_client_sent' labels: type: "EDU" - expr: 'synapse_federation_client_sent_edus + 0' + expr: 'synapse_federation_client_sent_edus_total + 0' - record: 'synapse_federation_client_sent' labels: type: "PDU" - expr: 'synapse_federation_client_sent_pdu_destinations:count + 0' + expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0' - record: 'synapse_federation_client_sent' labels: type: "Query" @@ -40,11 +40,11 @@ groups: - record: 'synapse_federation_server_received' labels: type: "EDU" - expr: 'synapse_federation_server_received_edus + 0' + expr: 'synapse_federation_server_received_edus_total + 0' - record: 'synapse_federation_server_received' labels: type: "PDU" - expr: 'synapse_federation_server_received_pdus + 0' + expr: 'synapse_federation_server_received_pdus_total + 0' - record: 'synapse_federation_server_received' labels: type: "Query" @@ -60,19 +60,19 @@ groups: expr: 'synapse_federation_transaction_queue_pending_pdus + 0' - record: synapse_storage_events_persisted_by_source_type - expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"}) + expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"}) labels: type: remote - record: synapse_storage_events_persisted_by_source_type - expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"}) + expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"}) labels: type: local - record: synapse_storage_events_persisted_by_source_type - expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"}) + expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"}) labels: type: bridges - record: synapse_storage_events_persisted_by_event_type - expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep) + expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total) - record: synapse_storage_events_persisted_by_origin - expr: sum without(type) (synapse_storage_events_persisted_events_sep) + expr: sum without(type) (synapse_storage_events_persisted_events_sep_total) diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 51c8d15711..53db1e85b3 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -32,15 +32,15 @@ logger = logging.getLogger("synapse.app.homeserver") _stats_process: List[Tuple[int, "resource.struct_rusage"]] = [] # Gauges to expose monthly active user control metrics -current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") +current_mau_gauge = Gauge("synapse_admin_mau_current", "Current MAU") current_mau_by_service_gauge = Gauge( "synapse_admin_mau_current_mau_by_service", "Current MAU by service", ["app_service"], ) -max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") +max_mau_gauge = Gauge("synapse_admin_mau_max", "MAU Limit") registered_reserved_users_mau_gauge = Gauge( - "synapse_admin_mau:registered_reserved_users", + "synapse_admin_mau_registered_reserved_users", "Registered users with reserved threepids", ) diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 8bc60e3e3e..a6cb3ba58f 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -62,12 +62,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) sent_pdus_destination_dist_count = Counter( - "synapse_federation_client_sent_pdu_destinations:count", + "synapse_federation_client_sent_pdu_destinations_count", "Number of PDUs queued for sending to one or more destinations", ) sent_pdus_destination_dist_total = Counter( - "synapse_federation_client_sent_pdu_destinations:total", + "synapse_federation_client_sent_pdu_destinations", "Total number of PDUs queued for sending across all destinations", ) diff --git a/synapse/metrics/_legacy_exposition.py b/synapse/metrics/_legacy_exposition.py index 6f00ff2a47..133f1603dd 100644 --- a/synapse/metrics/_legacy_exposition.py +++ b/synapse/metrics/_legacy_exposition.py @@ -93,6 +93,11 @@ LEGACY_METRIC_NAMES = { "synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits", "synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size", "synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total", + "synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total", + "synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count", + "synapse_admin_mau_current": "synapse_admin_mau:current", + "synapse_admin_mau_max": "synapse_admin_mau:max", + "synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users", } -- cgit 1.4.1 From cf11919ddd4f48b2f59062542ba62969042f80aa Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 8 Sep 2022 14:30:48 +0000 Subject: Fix cache metrics not being updated when not using the legacy exposition module. (#13717) --- changelog.d/13717.misc | 1 + synapse/metrics/_legacy_exposition.py | 7 ---- synapse/util/caches/__init__.py | 60 ++++++++++++++++++++++++++++------- synapse/util/metrics.py | 34 ++++++++++++++++++-- 4 files changed, 81 insertions(+), 21 deletions(-) create mode 100644 changelog.d/13717.misc diff --git a/changelog.d/13717.misc b/changelog.d/13717.misc new file mode 100644 index 0000000000..07ace50b12 --- /dev/null +++ b/changelog.d/13717.misc @@ -0,0 +1 @@ +Add experimental configuration option to allow disabling legacy Prometheus metric names. \ No newline at end of file diff --git a/synapse/metrics/_legacy_exposition.py b/synapse/metrics/_legacy_exposition.py index 133f1603dd..563d8cc2c6 100644 --- a/synapse/metrics/_legacy_exposition.py +++ b/synapse/metrics/_legacy_exposition.py @@ -34,8 +34,6 @@ from prometheus_client.core import Sample from twisted.web.resource import Resource from twisted.web.server import Request -from synapse.util import caches - CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8" @@ -107,11 +105,6 @@ def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> byt by prometheus-client. """ - # Trigger the cache metrics to be rescraped, which updates the common - # metrics but do not produce metrics themselves - for collector in caches.collectors_by_name.values(): - collector.collect() - output = [] for metric in registry.collect(): diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index d4a2b77c29..35c0be08b0 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -20,9 +20,11 @@ from sys import intern from typing import Any, Callable, Dict, List, Optional, Sized, TypeVar import attr +from prometheus_client import REGISTRY from prometheus_client.core import Gauge from synapse.config.cache import add_resizable_cache +from synapse.util.metrics import DynamicCollectorRegistry logger = logging.getLogger(__name__) @@ -30,27 +32,62 @@ logger = logging.getLogger(__name__) # Whether to track estimated memory usage of the LruCaches. TRACK_MEMORY_USAGE = False +# We track cache metrics in a special registry that lets us update the metrics +# just before they are returned from the scrape endpoint. +CACHE_METRIC_REGISTRY = DynamicCollectorRegistry() caches_by_name: Dict[str, Sized] = {} -collectors_by_name: Dict[str, "CacheMetric"] = {} -cache_size = Gauge("synapse_util_caches_cache_size", "", ["name"]) -cache_hits = Gauge("synapse_util_caches_cache_hits", "", ["name"]) -cache_evicted = Gauge("synapse_util_caches_cache_evicted_size", "", ["name", "reason"]) -cache_total = Gauge("synapse_util_caches_cache", "", ["name"]) -cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"]) +cache_size = Gauge( + "synapse_util_caches_cache_size", "", ["name"], registry=CACHE_METRIC_REGISTRY +) +cache_hits = Gauge( + "synapse_util_caches_cache_hits", "", ["name"], registry=CACHE_METRIC_REGISTRY +) +cache_evicted = Gauge( + "synapse_util_caches_cache_evicted_size", + "", + ["name", "reason"], + registry=CACHE_METRIC_REGISTRY, +) +cache_total = Gauge( + "synapse_util_caches_cache", "", ["name"], registry=CACHE_METRIC_REGISTRY +) +cache_max_size = Gauge( + "synapse_util_caches_cache_max_size", "", ["name"], registry=CACHE_METRIC_REGISTRY +) cache_memory_usage = Gauge( "synapse_util_caches_cache_size_bytes", "Estimated memory usage of the caches", ["name"], + registry=CACHE_METRIC_REGISTRY, ) -response_cache_size = Gauge("synapse_util_caches_response_cache_size", "", ["name"]) -response_cache_hits = Gauge("synapse_util_caches_response_cache_hits", "", ["name"]) +response_cache_size = Gauge( + "synapse_util_caches_response_cache_size", + "", + ["name"], + registry=CACHE_METRIC_REGISTRY, +) +response_cache_hits = Gauge( + "synapse_util_caches_response_cache_hits", + "", + ["name"], + registry=CACHE_METRIC_REGISTRY, +) response_cache_evicted = Gauge( - "synapse_util_caches_response_cache_evicted_size", "", ["name", "reason"] + "synapse_util_caches_response_cache_evicted_size", + "", + ["name", "reason"], + registry=CACHE_METRIC_REGISTRY, ) -response_cache_total = Gauge("synapse_util_caches_response_cache", "", ["name"]) +response_cache_total = Gauge( + "synapse_util_caches_response_cache", "", ["name"], registry=CACHE_METRIC_REGISTRY +) + + +# Register our custom cache metrics registry with the global registry +REGISTRY.register(CACHE_METRIC_REGISTRY) class EvictionReason(Enum): @@ -168,9 +205,8 @@ def register_cache( add_resizable_cache(cache_name, resize_callback) metric = CacheMetric(cache, cache_type, cache_name, collect_callback) - metric_name = "cache_%s_%s" % (cache_type, cache_name) caches_by_name[cache_name] = cache - collectors_by_name[metric_name] = metric + CACHE_METRIC_REGISTRY.register_hook(metric.collect) return metric diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index bc3b4938ea..9687120ebf 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -15,9 +15,9 @@ import logging from functools import wraps from types import TracebackType -from typing import Awaitable, Callable, Optional, Type, TypeVar +from typing import Awaitable, Callable, Generator, List, Optional, Type, TypeVar -from prometheus_client import Counter +from prometheus_client import CollectorRegistry, Counter, Metric from typing_extensions import Concatenate, ParamSpec, Protocol from synapse.logging.context import ( @@ -208,3 +208,33 @@ class Measure: metrics.real_time_sum += duration # TODO: Add other in flight metrics. + + +class DynamicCollectorRegistry(CollectorRegistry): + """ + Custom Prometheus Collector registry that calls a hook first, allowing you + to update metrics on-demand. + + Don't forget to register this registry with the main registry! + """ + + def __init__(self) -> None: + super().__init__() + self._pre_update_hooks: List[Callable[[], None]] = [] + + def collect(self) -> Generator[Metric, None, None]: + """ + Collects metrics, calling pre-update hooks first. + """ + + for pre_update_hook in self._pre_update_hooks: + pre_update_hook() + + yield from super().collect() + + def register_hook(self, hook: Callable[[], None]) -> None: + """ + Registers a hook that is called before metric collection. + """ + + self._pre_update_hooks.append(hook) -- cgit 1.4.1 From 8ef0c8ff14fcf613c5df1cfc30b38236de1695a7 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 8 Sep 2022 15:54:36 +0100 Subject: Fix error in `is_mine_id` when encountering a malformed ID (#13746) Previously, `is_mine_id` would raise an exception when passed an ID with no colons. Return `False` instead. Fixes #13040. Signed-off-by: Sean Quah --- changelog.d/13746.bugfix | 1 + synapse/server.py | 12 +++++++++++- tests/test_types.py | 26 +++++++++++++++++++++++++- 3 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13746.bugfix diff --git a/changelog.d/13746.bugfix b/changelog.d/13746.bugfix new file mode 100644 index 0000000000..b692af8fd5 --- /dev/null +++ b/changelog.d/13746.bugfix @@ -0,0 +1 @@ +Fix a long standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. diff --git a/synapse/server.py b/synapse/server.py index 5a99c0b344..df3a1cb405 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -341,7 +341,17 @@ class HomeServer(metaclass=abc.ABCMeta): return domain_specific_string.domain == self.hostname def is_mine_id(self, string: str) -> bool: - return string.split(":", 1)[1] == self.hostname + """Determines whether a user ID or room alias originates from this homeserver. + + Returns: + `True` if the hostname part of the user ID or room alias matches this + homeserver. + `False` otherwise, or if the user ID or room alias is malformed. + """ + localpart_hostname = string.split(":", 1) + if len(localpart_hostname) < 2: + return False + return localpart_hostname[1] == self.hostname @cache_in_self def get_clock(self) -> Clock: diff --git a/tests/test_types.py b/tests/test_types.py index d8d82a517e..1111169384 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -13,11 +13,35 @@ # limitations under the License. from synapse.api.errors import SynapseError -from synapse.types import RoomAlias, UserID, map_username_to_mxid_localpart +from synapse.types import ( + RoomAlias, + UserID, + get_domain_from_id, + get_localpart_from_id, + map_username_to_mxid_localpart, +) from tests import unittest +class IsMineIDTests(unittest.HomeserverTestCase): + def test_is_mine_id(self) -> None: + self.assertTrue(self.hs.is_mine_id("@user:test")) + self.assertTrue(self.hs.is_mine_id("#room:test")) + self.assertTrue(self.hs.is_mine_id("invalid:test")) + + self.assertFalse(self.hs.is_mine_id("@user:test\0")) + self.assertFalse(self.hs.is_mine_id("@user")) + + def test_two_colons(self) -> None: + """Test handling of IDs containing more than one colon.""" + # The domain starts after the first colon. + # These functions must interpret things consistently. + self.assertFalse(self.hs.is_mine_id("@user:test:test")) + self.assertEqual("user", get_localpart_from_id("@user:test:test")) + self.assertEqual("test:test", get_domain_from_id("@user:test:test")) + + class UserIDTestCase(unittest.HomeserverTestCase): def test_parse(self): user = UserID.from_string("@1234abcd:test") -- cgit 1.4.1 From 89e8b98b6522dc9fafd5c32efe7a84dc9a31246a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 8 Sep 2022 15:55:03 +0100 Subject: Avoid raising errors due to malformed IDs in `get_current_hosts_in_room` (#13748) Handle malformed user IDs with no colons in `get_current_hosts_in_room`. It's not currently possible for a malformed user ID to join a room, so this error would never be hit. Signed-off-by: Sean Quah --- changelog.d/13748.misc | 1 + synapse/storage/databases/main/roommember.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13748.misc diff --git a/changelog.d/13748.misc b/changelog.d/13748.misc new file mode 100644 index 0000000000..2f419bb659 --- /dev/null +++ b/changelog.d/13748.misc @@ -0,0 +1 @@ +Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index a77e49dc66..e6b87010a9 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1044,6 +1044,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): # We use a `Set` just for fast lookups domain_set: Set[str] = set() for u in users: + if ":" not in u: + continue domain = get_domain_from_id(u) if domain not in domain_set: domain_set.add(domain) @@ -1077,7 +1079,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): ORDER BY min(e.depth) ASC; """ txn.execute(sql, (room_id,)) - return [d for d, in txn] + # `server_domain` will be `NULL` for malformed MXIDs with no colons. + return [d for d, in txn if d is not None] return await self.db_pool.runInteraction( "get_current_hosts_in_room", get_current_hosts_in_room_txn -- cgit 1.4.1 From 906cead9ca5fc95fd64680ec18f775f4de6ea97f Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 8 Sep 2022 15:55:29 +0100 Subject: Update docstrings to explain the impact of partial state (#13750) Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. Signed-off-by: Sean Quah --- changelog.d/13750.misc | 1 + synapse/storage/databases/main/roommember.py | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13750.misc diff --git a/changelog.d/13750.misc b/changelog.d/13750.misc new file mode 100644 index 0000000000..3bccc21fc5 --- /dev/null +++ b/changelog.d/13750.misc @@ -0,0 +1 @@ +Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e6b87010a9..6e1ff5626b 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -192,8 +192,15 @@ class RoomMemberWorkerStore(EventsWorkerStore): (aka. with the lowest depth). This is done to match the sort in `get_current_hosts_in_room()` and so we can re-use the cache but it's not horrible to have here either. - """ + Uses `m.room.member`s in the room state at the current forward extremities to + determine which users are in the room. + + Will return inaccurate results for rooms with partial state, since the state for + the forward extremities of those rooms will exclude most members. We may also + calculate room state incorrectly for such rooms and believe that a member is or + is not in the room when the opposite is true. + """ return await self.db_pool.runInteraction( "get_users_in_room", self.get_users_in_room_txn, room_id ) @@ -1022,6 +1029,14 @@ class RoomMemberWorkerStore(EventsWorkerStore): longest is good because they're most likely to have anything we ask about. + Uses `m.room.member`s in the room state at the current forward extremities to + determine which hosts are in the room. + + Will return inaccurate results for rooms with partial state, since the state for + the forward extremities of those rooms will exclude most members. We may also + calculate room state incorrectly for such rooms and believe that a host is or + is not in the room when the opposite is true. + Returns: Returns a list of servers sorted by longest in the room first. (aka. sorted by join with the lowest depth first). -- cgit 1.4.1 From f799eac7ea96f943ad1272a5a81f845dfa08a254 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 8 Sep 2022 17:41:48 +0200 Subject: Add timestamp to user's consent (#13741) Co-authored-by: reivilibre --- changelog.d/13741.feature | 1 + docs/admin_api/user_admin_api.md | 2 ++ synapse/handlers/admin.py | 1 + synapse/storage/databases/main/registration.py | 6 +++- .../main/delta/72/06add_consent_ts_to_users.sql | 16 +++++++++++ tests/rest/admin/test_user.py | 1 + tests/storage/test_registration.py | 33 +++++++++++++++++----- 7 files changed, 52 insertions(+), 8 deletions(-) create mode 100644 changelog.d/13741.feature create mode 100644 synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql diff --git a/changelog.d/13741.feature b/changelog.d/13741.feature new file mode 100644 index 0000000000..dff46f373f --- /dev/null +++ b/changelog.d/13741.feature @@ -0,0 +1 @@ +Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index c1ca0c8a64..975f05c929 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -42,6 +42,7 @@ It returns a JSON body like the following: "appservice_id": null, "consent_server_notice_sent": null, "consent_version": null, + "consent_ts": null, "external_ids": [ { "auth_provider": "", @@ -364,6 +365,7 @@ The following actions are **NOT** performed. The list may be incomplete. - Remove the user's creation (registration) timestamp - [Remove rate limit overrides](#override-ratelimiting-for-users) - Remove from monthly active users +- Remove user's consent information (consent version and timestamp) ## Reset password diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index d4fe7df533..cf9f19608a 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -70,6 +70,7 @@ class AdminHandler: "appservice_id", "consent_server_notice_sent", "consent_version", + "consent_ts", "user_type", "is_guest", } diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 7fb9c801da..ac821878b0 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -175,6 +175,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): "is_guest", "admin", "consent_version", + "consent_ts", "consent_server_notice_sent", "appservice_id", "creation_ts", @@ -2227,7 +2228,10 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): txn, table="users", keyvalues={"name": user_id}, - updatevalues={"consent_version": consent_version}, + updatevalues={ + "consent_version": consent_version, + "consent_ts": self._clock.time_msec(), + }, ) self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) diff --git a/synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql b/synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql new file mode 100644 index 0000000000..609eb1750f --- /dev/null +++ b/synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql @@ -0,0 +1,16 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE users ADD consent_ts bigint; diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 1afd082707..ec5ccf6fca 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -2580,6 +2580,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertIn("appservice_id", content) self.assertIn("consent_server_notice_sent", content) self.assertIn("consent_version", content) + self.assertIn("consent_ts", content) self.assertIn("external_ids", content) # This key was removed intentionally. Ensure it is not accidentally re-included. diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index a49ac1525e..853a93afab 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -11,15 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import UserTypes from synapse.api.errors import ThreepidValidationError +from synapse.server import HomeServer +from synapse.util import Clock from tests.unittest import HomeserverTestCase class RegistrationStoreTestCase(HomeserverTestCase): - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.user_id = "@my-user:test" @@ -27,7 +30,7 @@ class RegistrationStoreTestCase(HomeserverTestCase): self.pwhash = "{xx1}123456789" self.device_id = "akgjhdjklgshg" - def test_register(self): + def test_register(self) -> None: self.get_success(self.store.register_user(self.user_id, self.pwhash)) self.assertEqual( @@ -38,6 +41,7 @@ class RegistrationStoreTestCase(HomeserverTestCase): "admin": 0, "is_guest": 0, "consent_version": None, + "consent_ts": None, "consent_server_notice_sent": None, "appservice_id": None, "creation_ts": 0, @@ -48,7 +52,20 @@ class RegistrationStoreTestCase(HomeserverTestCase): (self.get_success(self.store.get_user_by_id(self.user_id))), ) - def test_add_tokens(self): + def test_consent(self) -> None: + self.get_success(self.store.register_user(self.user_id, self.pwhash)) + before_consent = self.clock.time_msec() + self.reactor.advance(5) + self.get_success(self.store.user_set_consent_version(self.user_id, "1")) + self.reactor.advance(5) + + user = self.get_success(self.store.get_user_by_id(self.user_id)) + assert user + self.assertEqual(user["consent_version"], "1") + self.assertGreater(user["consent_ts"], before_consent) + self.assertLess(user["consent_ts"], self.clock.time_msec()) + + def test_add_tokens(self) -> None: self.get_success(self.store.register_user(self.user_id, self.pwhash)) self.get_success( self.store.add_access_token_to_user( @@ -58,11 +75,12 @@ class RegistrationStoreTestCase(HomeserverTestCase): result = self.get_success(self.store.get_user_by_access_token(self.tokens[1])) + assert result self.assertEqual(result.user_id, self.user_id) self.assertEqual(result.device_id, self.device_id) self.assertIsNotNone(result.token_id) - def test_user_delete_access_tokens(self): + def test_user_delete_access_tokens(self) -> None: # add some tokens self.get_success(self.store.register_user(self.user_id, self.pwhash)) self.get_success( @@ -87,6 +105,7 @@ class RegistrationStoreTestCase(HomeserverTestCase): # check the one not associated with the device was not deleted user = self.get_success(self.store.get_user_by_access_token(self.tokens[0])) + assert user self.assertEqual(self.user_id, user.user_id) # now delete the rest @@ -95,11 +114,11 @@ class RegistrationStoreTestCase(HomeserverTestCase): user = self.get_success(self.store.get_user_by_access_token(self.tokens[0])) self.assertIsNone(user, "access token was not deleted without device_id") - def test_is_support_user(self): + def test_is_support_user(self) -> None: TEST_USER = "@test:test" SUPPORT_USER = "@support:test" - res = self.get_success(self.store.is_support_user(None)) + res = self.get_success(self.store.is_support_user(None)) # type: ignore[arg-type] self.assertFalse(res) self.get_success( self.store.register_user(user_id=TEST_USER, password_hash=None) @@ -115,7 +134,7 @@ class RegistrationStoreTestCase(HomeserverTestCase): res = self.get_success(self.store.is_support_user(SUPPORT_USER)) self.assertTrue(res) - def test_3pid_inhibit_invalid_validation_session_error(self): + def test_3pid_inhibit_invalid_validation_session_error(self) -> None: """Tests that enabling the configuration option to inhibit 3PID errors on /requestToken also inhibits validation errors caused by an unknown session ID. """ -- cgit 1.4.1 From 5261d2e2e869d10863bcd13e45f16e8cac99e3d0 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 8 Sep 2022 17:50:15 +0000 Subject: Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. (#13756) --- changelog.d/13756.misc | 1 + contrib/prometheus/synapse-v2.rules | 31 ++++++++----------------------- 2 files changed, 9 insertions(+), 23 deletions(-) create mode 100644 changelog.d/13756.misc diff --git a/changelog.d/13756.misc b/changelog.d/13756.misc new file mode 100644 index 0000000000..06e9cd09bf --- /dev/null +++ b/changelog.d/13756.misc @@ -0,0 +1 @@ +Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. \ No newline at end of file diff --git a/contrib/prometheus/synapse-v2.rules b/contrib/prometheus/synapse-v2.rules index a5e6a735cd..cbe6f7beba 100644 --- a/contrib/prometheus/synapse-v2.rules +++ b/contrib/prometheus/synapse-v2.rules @@ -1,29 +1,7 @@ groups: - name: synapse rules: - - record: "synapse_federation_transaction_queue_pendingEdus:total" - expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)" - - record: "synapse_federation_transaction_queue_pendingPdus:total" - expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)" - - record: 'synapse_http_server_request_count:method' - labels: - servlet: "" - expr: "sum(synapse_http_server_request_count) by (method)" - - record: 'synapse_http_server_request_count:servlet' - labels: - method: "" - expr: 'sum(synapse_http_server_request_count) by (servlet)' - - - record: 'synapse_http_server_request_count:total' - labels: - servlet: "" - expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)' - - - record: 'synapse_cache:hit_ratio_5m' - expr: 'rate(synapse_util_caches_cache_hits[5m]) / rate(synapse_util_caches_cache[5m])' - - record: 'synapse_cache:hit_ratio_30s' - expr: 'rate(synapse_util_caches_cache_hits[30s]) / rate(synapse_util_caches_cache[30s])' - + # These 3 rules are used in the included Prometheus console - record: 'synapse_federation_client_sent' labels: type: "EDU" @@ -37,6 +15,7 @@ groups: type: "Query" expr: 'sum(synapse_federation_client_sent_queries) by (job)' + # These 3 rules are used in the included Prometheus console - record: 'synapse_federation_server_received' labels: type: "EDU" @@ -50,6 +29,7 @@ groups: type: "Query" expr: 'sum(synapse_federation_server_received_queries) by (job)' + # These 2 rules are used in the included Prometheus console - record: 'synapse_federation_transaction_queue_pending' labels: type: "EDU" @@ -59,6 +39,7 @@ groups: type: "PDU" expr: 'synapse_federation_transaction_queue_pending_pdus + 0' + # These 3 rules are used in the included Grafana dashboard - record: synapse_storage_events_persisted_by_source_type expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"}) labels: @@ -71,8 +52,12 @@ groups: expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"}) labels: type: bridges + + # This rule is used in the included Grafana dashboard - record: synapse_storage_events_persisted_by_event_type expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total) + + # This rule is used in the included Grafana dashboard - record: synapse_storage_events_persisted_by_origin expr: sum without(type) (synapse_storage_events_persisted_events_sep_total) -- cgit 1.4.1 From 69fa29700e260f55238bfa480d3dd2b91a4353c0 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 8 Sep 2022 20:13:39 +0100 Subject: Re-type hint some collections in `/sync` code as read-only (#13754) Signed-off-by: Sean Quah --- changelog.d/13754.misc | 1 + synapse/handlers/sync.py | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) create mode 100644 changelog.d/13754.misc diff --git a/changelog.d/13754.misc b/changelog.d/13754.misc new file mode 100644 index 0000000000..662ee00e99 --- /dev/null +++ b/changelog.d/13754.misc @@ -0,0 +1 @@ +Re-type hint some collections as read-only. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 2d95b1fa24..5293fa4d0e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -15,6 +15,7 @@ import itertools import logging from typing import ( TYPE_CHECKING, + AbstractSet, Any, Collection, Dict, @@ -1413,10 +1414,10 @@ class SyncHandler: async def _generate_sync_entry_for_device_list( self, sync_result_builder: "SyncResultBuilder", - newly_joined_rooms: Set[str], - newly_joined_or_invited_or_knocked_users: Set[str], - newly_left_rooms: Set[str], - newly_left_users: Set[str], + newly_joined_rooms: AbstractSet[str], + newly_joined_or_invited_or_knocked_users: AbstractSet[str], + newly_left_rooms: AbstractSet[str], + newly_left_users: AbstractSet[str], ) -> DeviceListUpdates: """Generate the DeviceListUpdates section of sync @@ -1434,8 +1435,7 @@ class SyncHandler: user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token - # We're going to mutate these fields, so lets copy them rather than - # assume they won't get used later. + # Take a copy since these fields will be mutated later. newly_joined_or_invited_or_knocked_users = set( newly_joined_or_invited_or_knocked_users ) @@ -1635,8 +1635,8 @@ class SyncHandler: async def _generate_sync_entry_for_presence( self, sync_result_builder: "SyncResultBuilder", - newly_joined_rooms: Set[str], - newly_joined_or_invited_users: Set[str], + newly_joined_rooms: AbstractSet[str], + newly_joined_or_invited_users: AbstractSet[str], ) -> None: """Generates the presence portion of the sync response. Populates the `sync_result_builder` with the result. @@ -1694,7 +1694,7 @@ class SyncHandler: self, sync_result_builder: "SyncResultBuilder", account_data_by_room: Dict[str, Dict[str, JsonDict]], - ) -> Tuple[Set[str], Set[str], Set[str], Set[str]]: + ) -> Tuple[AbstractSet[str], AbstractSet[str], AbstractSet[str], AbstractSet[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. @@ -2534,7 +2534,7 @@ class SyncResultBuilder: archived: List[ArchivedSyncResult] = attr.Factory(list) to_device: List[JsonDict] = attr.Factory(list) - def calculate_user_changes(self) -> Tuple[Set[str], Set[str]]: + def calculate_user_changes(self) -> Tuple[AbstractSet[str], AbstractSet[str]]: """Work out which other users have joined or left rooms we are joined to. This data only is only useful for an incremental sync. -- cgit 1.4.1 From f2d2481e56f06005de5ae8429eca3bb31834079e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 9 Sep 2022 11:14:10 +0100 Subject: Require SQLite >= 3.27.0 (#13760) --- changelog.d/13760.removal | 1 + synapse/storage/database.py | 47 +++++----- synapse/storage/databases/main/lock.py | 121 ++++++++----------------- synapse/storage/databases/main/stats.py | 86 +++++++----------- synapse/storage/databases/main/transactions.py | 30 ++---- synapse/storage/engines/_base.py | 8 -- synapse/storage/engines/postgres.py | 7 -- synapse/storage/engines/sqlite.py | 13 +-- tests/storage/test_base.py | 1 - 9 files changed, 106 insertions(+), 208 deletions(-) create mode 100644 changelog.d/13760.removal diff --git a/changelog.d/13760.removal b/changelog.d/13760.removal new file mode 100644 index 0000000000..624e7c3678 --- /dev/null +++ b/changelog.d/13760.removal @@ -0,0 +1 @@ +Synapse will now refuse to start if configured to use SQLite < 3.27. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index b394a6658b..e881bff7fb 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -533,15 +533,14 @@ class DatabasePool: if isinstance(self.engine, Sqlite3Engine): self._unsafe_to_upsert_tables.add("user_directory_search") - if self.engine.can_native_upsert: - # Check ASAP (and then later, every 1s) to see if we have finished - # background updates of tables that aren't safe to update. - self._clock.call_later( - 0.0, - run_as_background_process, - "upsert_safety_check", - self._check_safe_to_upsert, - ) + # Check ASAP (and then later, every 1s) to see if we have finished + # background updates of tables that aren't safe to update. + self._clock.call_later( + 0.0, + run_as_background_process, + "upsert_safety_check", + self._check_safe_to_upsert, + ) def name(self) -> str: "Return the name of this database" @@ -1160,11 +1159,8 @@ class DatabasePool: attempts = 0 while True: try: - # We can autocommit if we are going to use native upserts - autocommit = ( - self.engine.can_native_upsert - and table not in self._unsafe_to_upsert_tables - ) + # We can autocommit if it is safe to upsert + autocommit = table not in self._unsafe_to_upsert_tables return await self.runInteraction( desc, @@ -1199,7 +1195,7 @@ class DatabasePool: ) -> bool: """ Pick the UPSERT method which works best on the platform. Either the - native one (Pg9.5+, recent SQLites), or fall back to an emulated method. + native one (Pg9.5+, SQLite >= 3.24), or fall back to an emulated method. Args: txn: The transaction to use. @@ -1207,14 +1203,15 @@ class DatabasePool: keyvalues: The unique key tables and their new values values: The nonunique columns and their new values insertion_values: additional key/values to use only when inserting - lock: True to lock the table when doing the upsert. + lock: True to lock the table when doing the upsert. Unused when performing + a native upsert. Returns: Returns True if a row was inserted or updated (i.e. if `values` is not empty then this always returns True) """ insertion_values = insertion_values or {} - if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: + if table not in self._unsafe_to_upsert_tables: return self.simple_upsert_txn_native_upsert( txn, table, keyvalues, values, insertion_values=insertion_values ) @@ -1365,14 +1362,12 @@ class DatabasePool: value_names: The value column names value_values: A list of each row's value column values. Ignored if value_names is empty. - lock: True to lock the table when doing the upsert. Unused if the database engine - supports native upserts. + lock: True to lock the table when doing the upsert. Unused when performing + a native upsert. """ - # We can autocommit if we are going to use native upserts - autocommit = ( - self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables - ) + # We can autocommit if it safe to upsert + autocommit = table not in self._unsafe_to_upsert_tables await self.runInteraction( desc, @@ -1406,10 +1401,10 @@ class DatabasePool: value_names: The value column names value_values: A list of each row's value column values. Ignored if value_names is empty. - lock: True to lock the table when doing the upsert. Unused if the database engine - supports native upserts. + lock: True to lock the table when doing the upsert. Unused when performing + a native upsert. """ - if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: + if table not in self._unsafe_to_upsert_tables: return self.simple_upsert_many_txn_native_upsert( txn, table, key_names, key_values, value_names, value_values ) diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 2d7633fbd5..7270ef09da 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -129,91 +129,48 @@ class LockStore(SQLBaseStore): now = self._clock.time_msec() token = random_string(6) - if self.db_pool.engine.can_native_upsert: - - def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool: - # We take out the lock if either a) there is no row for the lock - # already, b) the existing row has timed out, or c) the row is - # for this instance (which means the process got killed and - # restarted) - sql = """ - INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts) - VALUES (?, ?, ?, ?, ?) - ON CONFLICT (lock_name, lock_key) - DO UPDATE - SET - token = EXCLUDED.token, - instance_name = EXCLUDED.instance_name, - last_renewed_ts = EXCLUDED.last_renewed_ts - WHERE - worker_locks.last_renewed_ts < ? - OR worker_locks.instance_name = EXCLUDED.instance_name - """ - txn.execute( - sql, - ( - lock_name, - lock_key, - self._instance_name, - token, - now, - now - _LOCK_TIMEOUT_MS, - ), - ) - - # We only acquired the lock if we inserted or updated the table. - return bool(txn.rowcount) - - did_lock = await self.db_pool.runInteraction( - "try_acquire_lock", - _try_acquire_lock_txn, - # We can autocommit here as we're executing a single query, this - # will avoid serialization errors. - db_autocommit=True, + def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool: + # We take out the lock if either a) there is no row for the lock + # already, b) the existing row has timed out, or c) the row is + # for this instance (which means the process got killed and + # restarted) + sql = """ + INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts) + VALUES (?, ?, ?, ?, ?) + ON CONFLICT (lock_name, lock_key) + DO UPDATE + SET + token = EXCLUDED.token, + instance_name = EXCLUDED.instance_name, + last_renewed_ts = EXCLUDED.last_renewed_ts + WHERE + worker_locks.last_renewed_ts < ? + OR worker_locks.instance_name = EXCLUDED.instance_name + """ + txn.execute( + sql, + ( + lock_name, + lock_key, + self._instance_name, + token, + now, + now - _LOCK_TIMEOUT_MS, + ), ) - if not did_lock: - return None - - else: - # If we're on an old SQLite we emulate the above logic by first - # clearing out any existing stale locks and then upserting. - - def _try_acquire_lock_emulated_txn(txn: LoggingTransaction) -> bool: - sql = """ - DELETE FROM worker_locks - WHERE - lock_name = ? - AND lock_key = ? - AND (last_renewed_ts < ? OR instance_name = ?) - """ - txn.execute( - sql, - (lock_name, lock_key, now - _LOCK_TIMEOUT_MS, self._instance_name), - ) - - inserted = self.db_pool.simple_upsert_txn_emulated( - txn, - table="worker_locks", - keyvalues={ - "lock_name": lock_name, - "lock_key": lock_key, - }, - values={}, - insertion_values={ - "token": token, - "last_renewed_ts": self._clock.time_msec(), - "instance_name": self._instance_name, - }, - ) - - return inserted - did_lock = await self.db_pool.runInteraction( - "try_acquire_lock_emulated", _try_acquire_lock_emulated_txn - ) + # We only acquired the lock if we inserted or updated the table. + return bool(txn.rowcount) - if not did_lock: - return None + did_lock = await self.db_pool.runInteraction( + "try_acquire_lock", + _try_acquire_lock_txn, + # We can autocommit here as we're executing a single query, this + # will avoid serialization errors. + db_autocommit=True, + ) + if not did_lock: + return None lock = Lock( self._reactor, diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index b4c652acf3..356d4ca788 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -446,59 +446,41 @@ class StatsStore(StateDeltasStore): absolutes: Absolute (set) fields additive_relatives: Fields that will be added onto if existing row present. """ - if self.database_engine.can_native_upsert: - absolute_updates = [ - "%(field)s = EXCLUDED.%(field)s" % {"field": field} - for field in absolutes.keys() - ] - - relative_updates = [ - "%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)" - % {"table": table, "field": field} - for field in additive_relatives.keys() - ] - - insert_cols = [] - qargs = [] - - for (key, val) in chain( - keyvalues.items(), absolutes.items(), additive_relatives.items() - ): - insert_cols.append(key) - qargs.append(val) + absolute_updates = [ + "%(field)s = EXCLUDED.%(field)s" % {"field": field} + for field in absolutes.keys() + ] + + relative_updates = [ + "%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)" + % {"table": table, "field": field} + for field in additive_relatives.keys() + ] + + insert_cols = [] + qargs = [] + + for (key, val) in chain( + keyvalues.items(), absolutes.items(), additive_relatives.items() + ): + insert_cols.append(key) + qargs.append(val) + + sql = """ + INSERT INTO %(table)s (%(insert_cols_cs)s) + VALUES (%(insert_vals_qs)s) + ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s + """ % { + "table": table, + "insert_cols_cs": ", ".join(insert_cols), + "insert_vals_qs": ", ".join( + ["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives)) + ), + "key_columns": ", ".join(keyvalues), + "updates": ", ".join(chain(absolute_updates, relative_updates)), + } - sql = """ - INSERT INTO %(table)s (%(insert_cols_cs)s) - VALUES (%(insert_vals_qs)s) - ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s - """ % { - "table": table, - "insert_cols_cs": ", ".join(insert_cols), - "insert_vals_qs": ", ".join( - ["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives)) - ), - "key_columns": ", ".join(keyvalues), - "updates": ", ".join(chain(absolute_updates, relative_updates)), - } - - txn.execute(sql, qargs) - else: - self.database_engine.lock_table(txn, table) - retcols = list(chain(absolutes.keys(), additive_relatives.keys())) - current_row = self.db_pool.simple_select_one_txn( - txn, table, keyvalues, retcols, allow_none=True - ) - if current_row is None: - merged_dict = {**keyvalues, **absolutes, **additive_relatives} - self.db_pool.simple_insert_txn(txn, table, merged_dict) - else: - for (key, val) in additive_relatives.items(): - if current_row[key] is None: - current_row[key] = val - else: - current_row[key] += val - current_row.update(absolutes) - self.db_pool.simple_update_one_txn(txn, table, keyvalues, current_row) + txn.execute(sql, qargs) async def _calculate_and_set_initial_state_for_room(self, room_id: str) -> None: """Calculate and insert an entry into room_stats_current. diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index ba79e19f7f..f8c6877ee8 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -221,25 +221,15 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): retry_interval: how long until next retry in ms """ - if self.database_engine.can_native_upsert: - await self.db_pool.runInteraction( - "set_destination_retry_timings", - self._set_destination_retry_timings_native, - destination, - failure_ts, - retry_last_ts, - retry_interval, - db_autocommit=True, # Safe as its a single upsert - ) - else: - await self.db_pool.runInteraction( - "set_destination_retry_timings", - self._set_destination_retry_timings_emulated, - destination, - failure_ts, - retry_last_ts, - retry_interval, - ) + await self.db_pool.runInteraction( + "set_destination_retry_timings", + self._set_destination_retry_timings_native, + destination, + failure_ts, + retry_last_ts, + retry_interval, + db_autocommit=True, # Safe as it's a single upsert + ) def _set_destination_retry_timings_native( self, @@ -249,8 +239,6 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): retry_last_ts: int, retry_interval: int, ) -> None: - assert self.database_engine.can_native_upsert - # Upsert retry time interval if retry_interval is zero (i.e. we're # resetting it) or greater than the existing retry interval. # diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 971ff82693..0d16a419a4 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -43,14 +43,6 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta): def single_threaded(self) -> bool: ... - @property - @abc.abstractmethod - def can_native_upsert(self) -> bool: - """ - Do we support native UPSERTs? - """ - ... - @property @abc.abstractmethod def supports_using_any_list(self) -> bool: diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 517f9d5f98..7f7d006ac2 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -158,13 +158,6 @@ class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]): cursor.close() db_conn.commit() - @property - def can_native_upsert(self) -> bool: - """ - Can we use native UPSERTs? - """ - return True - @property def supports_using_any_list(self) -> bool: """Do we support using `a = ANY(?)` and passing a list""" diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 621f2c5efe..095ae0a096 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -48,14 +48,6 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]): def single_threaded(self) -> bool: return True - @property - def can_native_upsert(self) -> bool: - """ - Do we support native UPSERTs? This requires SQLite3 3.24+, plus some - more work we haven't done yet to tell what was inserted vs updated. - """ - return sqlite3.sqlite_version_info >= (3, 24, 0) - @property def supports_using_any_list(self) -> bool: """Do we support using `a = ANY(?)` and passing a list""" @@ -70,12 +62,11 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]): self, db_conn: sqlite3.Connection, allow_outdated_version: bool = False ) -> None: if not allow_outdated_version: - version = sqlite3.sqlite_version_info # Synapse is untested against older SQLite versions, and we don't want # to let users upgrade to a version of Synapse with broken support for their # sqlite version, because it risks leaving them with a half-upgraded db. - if version < (3, 22, 0): - raise RuntimeError("Synapse requires sqlite 3.22 or above.") + if sqlite3.sqlite_version_info < (3, 27, 0): + raise RuntimeError("Synapse requires sqlite 3.27 or above.") def check_new_database(self, txn: Cursor) -> None: """Gets called when setting up a brand new database. This allows us to diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index cce8e75c74..40e58f8199 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -54,7 +54,6 @@ class SQLBaseStoreTestCase(unittest.TestCase): sqlite_config = {"name": "sqlite3"} engine = create_engine(sqlite_config) fake_engine = Mock(wraps=engine) - fake_engine.can_native_upsert = False fake_engine.in_transaction.return_value = False db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) -- cgit 1.4.1 From c85c5ace525c3cadac8501c2eba4abbc91d9f09d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 9 Sep 2022 11:29:04 +0100 Subject: Add rust to CI (#13763) --- .github/workflows/tests.yml | 83 ++++++++++++++++++++++++++++++++++++++++++++- changelog.d/13763.misc | 1 + rust/src/lib.rs | 1 + 3 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13763.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 50dc8e30d4..7c4ae3d7ff 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -10,6 +10,23 @@ concurrency: cancel-in-progress: true jobs: + # Job to detect what has changed so we don't run e.g. Rust checks on PRs that + # don't modify Rust code. + changes: + runs-on: ubuntu-latest + outputs: + rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }} + steps: + - uses: dorny/paths-filter@v2 + id: filter + # We only check on PRs + if: startsWith(github.ref, 'refs/pull/') + with: + filters: | + rust: + - 'rust/**' + - 'Cargo.toml' + check-sampleconfig: runs-on: ubuntu-latest steps: @@ -65,10 +82,54 @@ jobs: extras: "all" - run: poetry run scripts-dev/check_pydantic_models.py + lint-clippy: + runs-on: ubuntu-latest + needs: changes + if: ${{ needs.changes.outputs.rust == 'true' }} + + steps: + - uses: actions/checkout@v2 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.61.0 + override: true + components: clippy + - uses: Swatinem/rust-cache@v2 + + - run: cargo clippy + + lint-rustfmt: + runs-on: ubuntu-latest + needs: changes + if: ${{ needs.changes.outputs.rust == 'true' }} + + steps: + - uses: actions/checkout@v2 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.61.0 + override: true + components: rustfmt + - uses: Swatinem/rust-cache@v2 + + - run: cargo fmt --check + # Dummy step to gate other tests on without repeating the whole list linting-done: if: ${{ !cancelled() }} # Run this even if prior jobs were skipped - needs: [lint, lint-crlf, lint-newsfile, lint-pydantic, check-sampleconfig, check-schema-delta] + needs: + - lint + - lint-crlf + - lint-newsfile + - lint-pydantic + - check-sampleconfig + - check-schema-delta + - lint-clippy + - lint-rustfmt runs-on: ubuntu-latest steps: - run: "true" @@ -384,6 +445,25 @@ jobs: shell: bash name: Run Complement Tests + cargo-test: + if: ${{ needs.changes.outputs.rust == 'true' }} + runs-on: ubuntu-latest + needs: + - linting-done + - changes + + steps: + - uses: actions/checkout@v2 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.61.0 + override: true + - uses: Swatinem/rust-cache@v2 + + - run: cargo test + # a job which marks all the other jobs as complete, thus allowing PRs to be merged. tests-done: if: ${{ always() }} @@ -398,6 +478,7 @@ jobs: - export-data - portdb - complement + - cargo-test runs-on: ubuntu-latest steps: - uses: matrix-org/done-action@v2 diff --git a/changelog.d/13763.misc b/changelog.d/13763.misc new file mode 100644 index 0000000000..2e0dd68a0f --- /dev/null +++ b/changelog.d/13763.misc @@ -0,0 +1 @@ +Add a stub Rust crate. diff --git a/rust/src/lib.rs b/rust/src/lib.rs index fc4eb39154..142fc2ed93 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -11,5 +11,6 @@ fn sum_as_string(a: usize, b: usize) -> PyResult { #[pymodule] fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(sum_as_string, m)?)?; + Ok(()) } -- cgit 1.4.1 From 3d9f82efcb9c337197c9f50a88ec3fb541ee08ff Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Sep 2022 07:08:41 -0400 Subject: Use an upsert for `receipts_graph`. (#13752) Instead of a delete, then insert. This was previously done for `receipts_linearized` in 2dc430d36ef793b38d6d79ec8db4ea60588df2ee (#7607). --- changelog.d/13752.misc | 1 + synapse/storage/databases/main/receipts.py | 12 ++++-------- 2 files changed, 5 insertions(+), 8 deletions(-) create mode 100644 changelog.d/13752.misc diff --git a/changelog.d/13752.misc b/changelog.d/13752.misc new file mode 100644 index 0000000000..7624861b9f --- /dev/null +++ b/changelog.d/13752.misc @@ -0,0 +1 @@ +User an additional database query when persisting receipts. diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 124c70ad37..3838409519 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -812,7 +812,7 @@ class ReceiptsWorkerStore(SQLBaseStore): # FIXME: This shouldn't invalidate the whole cache txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,)) - self.db_pool.simple_delete_txn( + self.db_pool.simple_upsert_txn( txn, table="receipts_graph", keyvalues={ @@ -820,17 +820,13 @@ class ReceiptsWorkerStore(SQLBaseStore): "receipt_type": receipt_type, "user_id": user_id, }, - ) - self.db_pool.simple_insert_txn( - txn, - table="receipts_graph", values={ - "room_id": room_id, - "receipt_type": receipt_type, - "user_id": user_id, "event_ids": json_encoder.encode(event_ids), "data": json_encoder.encode(data), }, + # receipts_graph has a unique constraint on + # (user_id, room_id, receipt_type), so no need to lock + lock=False, ) -- cgit 1.4.1 From f694bb71b7ea7841a5b5db3d884dfda5a3f78023 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 9 Sep 2022 11:30:06 -0500 Subject: Strip number suffix from instance name to consolidate services that traces are spread over (#13729) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The problem with many services is that it makes it hard to find which service has the trace you want, see https://github.com/jaegertracing/jaeger-ui/issues/985 Previously, we split traces out into services based on their instance name like `matrix.org client_reader-1`, etc but there are many worker instances of the same `client_reader` so there is a lot to click through. With this PR, all of the traces are just collected under the worker type like `client_reader`, `event_persister` 😇 Note: A Synapse worker instance name is an opaque string with the number convention only being our own thing for the `matrix.org` deployment. But seems pretty sensible to group things this way. --- changelog.d/13729.misc | 1 + synapse/logging/opentracing.py | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13729.misc diff --git a/changelog.d/13729.misc b/changelog.d/13729.misc new file mode 100644 index 0000000000..c6a6f617e3 --- /dev/null +++ b/changelog.d/13729.misc @@ -0,0 +1 @@ +Strip number suffix from instance name to consolidate services that traces are spread over. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 482316a1ff..adf3f54770 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -203,6 +203,9 @@ if TYPE_CHECKING: # Helper class +# Matches the number suffix in an instance name like "matrix.org client_reader-8" +STRIP_INSTANCE_NUMBER_SUFFIX_REGEX = re.compile(r"[_-]?\d+$") + class _DummyTagNames: """wrapper of opentracings tags. We need to have them if we @@ -441,9 +444,17 @@ def init_tracer(hs: "HomeServer") -> None: from jaeger_client.metrics.prometheus import PrometheusMetricsFactory + # Instance names are opaque strings but by stripping off the number suffix, + # we can get something that looks like a "worker type", e.g. + # "client_reader-1" -> "client_reader" so we don't spread the traces across + # so many services. + instance_name_by_type = re.sub( + STRIP_INSTANCE_NUMBER_SUFFIX_REGEX, "", hs.get_instance_name() + ) + config = JaegerConfig( config=hs.config.tracing.jaeger_config, - service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}", + service_name=f"{hs.config.server.server_name} {instance_name_by_type}", scope_manager=LogContextScopeManager(), metrics_factory=PrometheusMetricsFactory(), ) -- cgit 1.4.1 From a911ffb42cc88adc8084a04acf6fd651efba278f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 9 Sep 2022 11:31:37 -0500 Subject: Tag trace with instance name (#13761) We tag the Synapse instance name so that it's an easy jumping off point into the logs. Can also be used to filter for an instance that is under load. As suggested by @clokep and @reivilibre in, - https://github.com/matrix-org/synapse/pull/13729#discussion_r964719258 - https://github.com/matrix-org/synapse/pull/13729#discussion_r964733578 --- changelog.d/13761.misc | 1 + synapse/api/auth.py | 7 +++++++ synapse/logging/opentracing.py | 6 ++++-- 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13761.misc diff --git a/changelog.d/13761.misc b/changelog.d/13761.misc new file mode 100644 index 0000000000..f7aa8c459a --- /dev/null +++ b/changelog.d/13761.misc @@ -0,0 +1 @@ +Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 8e54ef84b2..4a75eb6b21 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -32,6 +32,7 @@ from synapse.appservice import ApplicationService from synapse.http import get_request_user_agent from synapse.http.site import SynapseRequest from synapse.logging.opentracing import ( + SynapseTags, active_span, force_tracing, start_active_span, @@ -161,6 +162,12 @@ class Auth: parent_span.set_tag( "authenticated_entity", requester.authenticated_entity ) + # We tag the Synapse instance name so that it's an easy jumping + # off point into the logs. Can also be used to filter for an + # instance that is under load. + parent_span.set_tag( + SynapseTags.INSTANCE_NAME, self.hs.get_instance_name() + ) parent_span.set_tag("user_id", requester.user.to_string()) if requester.device_id is not None: parent_span.set_tag("device_id", requester.device_id) diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index adf3f54770..ca2735dd6d 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -298,6 +298,8 @@ class SynapseTags: # Whether the sync response has new data to be returned to the client. SYNC_RESULT = "sync.new_data" + INSTANCE_NAME = "instance_name" + # incoming HTTP request ID (as written in the logs) REQUEST_ID = "request_id" @@ -1043,11 +1045,11 @@ def trace_servlet( # with JsonResource). scope.span.set_operation_name(request.request_metrics.name) - # set the tags *after* the servlet completes, in case it decided to - # prioritise the span (tags will get dropped on unprioritised spans) request_tags[ SynapseTags.REQUEST_TAG ] = request.request_metrics.start_context.tag + # set the tags *after* the servlet completes, in case it decided to + # prioritise the span (tags will get dropped on unprioritised spans) for k, v in request_tags.items(): scope.span.set_tag(k, v) -- cgit 1.4.1 From 4c4889cac0e6f7df4689287b9fddea1bf8b15b7f Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Fri, 9 Sep 2022 19:00:21 +0100 Subject: Concurrently collect room unread counts for push badges (#13765) Most of the time this function is heavily cached, but when that isn't the case fetching the counts room by room slows down push delivery on users with many (thousands) of rooms. Signed off by Nick @ Beeper. --- changelog.d/13765.misc | 1 + synapse/push/push_tools.py | 13 ++++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13765.misc diff --git a/changelog.d/13765.misc b/changelog.d/13765.misc new file mode 100644 index 0000000000..fdda5cf3b6 --- /dev/null +++ b/changelog.d/13765.misc @@ -0,0 +1 @@ +Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 6661887d9f..658bf373b7 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -17,6 +17,7 @@ from synapse.events import EventBase from synapse.push.presentable_names import calculate_room_name, name_from_member_event from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore +from synapse.util.async_helpers import concurrently_execute async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int: @@ -25,13 +26,19 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) - badge = len(invites) - for room_id in joins: - notifs = await ( - store.get_unread_event_push_actions_by_room_for_user( + room_notifs = [] + + async def get_room_unread_count(room_id: str) -> None: + room_notifs.append( + await store.get_unread_event_push_actions_by_room_for_user( room_id, user_id, ) ) + + await concurrently_execute(get_room_unread_count, joins, 10) + + for notifs in room_notifs: if notifs.notify_count == 0: continue -- cgit 1.4.1 From ebfeac7c5ded851a2639911ec6adf9d0fcdb029a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Sep 2022 11:03:42 +0100 Subject: Check if Rust lib needs rebuilding. (#13759) This protects against the common mistake of failing to remember to rebuild Rust code after making changes. --- changelog.d/13759.misc | 1 + rust/Cargo.toml | 4 ++ rust/build.rs | 45 ++++++++++++++++++++++ rust/src/lib.rs | 10 ++++- stubs/synapse/synapse_rust.pyi | 1 + synapse/__init__.py | 5 +++ synapse/util/rust.py | 84 ++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13759.misc create mode 100644 rust/build.rs create mode 100644 synapse/util/rust.py diff --git a/changelog.d/13759.misc b/changelog.d/13759.misc new file mode 100644 index 0000000000..f91c512483 --- /dev/null +++ b/changelog.d/13759.misc @@ -0,0 +1 @@ +Add a check for editable installs if the Rust library needs rebuilding. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 0a9760cafc..deddf3cec2 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -19,3 +19,7 @@ name = "synapse.synapse_rust" [dependencies] pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] } + +[build-dependencies] +blake2 = "0.10.4" +hex = "0.4.3" diff --git a/rust/build.rs b/rust/build.rs new file mode 100644 index 0000000000..2117975e56 --- /dev/null +++ b/rust/build.rs @@ -0,0 +1,45 @@ +//! This build script calculates the hash of all files in the `src/` +//! directory and adds it as an environment variable during build time. +//! +//! This is used so that the python code can detect when the built native module +//! does not match the source in-tree, helping to detect the case where the +//! source has been updated but the library hasn't been rebuilt. + +use std::path::PathBuf; + +use blake2::{Blake2b512, Digest}; + +fn main() -> Result<(), std::io::Error> { + let mut dirs = vec![PathBuf::from("src")]; + + let mut paths = Vec::new(); + while let Some(path) = dirs.pop() { + let mut entries = std::fs::read_dir(path)? + .map(|res| res.map(|e| e.path())) + .collect::, std::io::Error>>()?; + + entries.sort(); + + for entry in entries { + if entry.is_dir() { + dirs.push(entry) + } else { + paths.push(entry.to_str().expect("valid rust paths").to_string()); + } + } + } + + paths.sort(); + + let mut hasher = Blake2b512::new(); + + for path in paths { + let bytes = std::fs::read(path)?; + hasher.update(bytes); + } + + let hex_digest = hex::encode(hasher.finalize()); + println!("cargo:rustc-env=SYNAPSE_RUST_DIGEST={hex_digest}"); + + Ok(()) +} diff --git a/rust/src/lib.rs b/rust/src/lib.rs index 142fc2ed93..ba42465fb8 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -1,5 +1,13 @@ use pyo3::prelude::*; +/// Returns the hash of all the rust source files at the time it was compiled. +/// +/// Used by python to detect if the rust library is outdated. +#[pyfunction] +fn get_rust_file_digest() -> &'static str { + env!("SYNAPSE_RUST_DIGEST") +} + /// Formats the sum of two numbers as string. #[pyfunction] #[pyo3(text_signature = "(a, b, /)")] @@ -11,6 +19,6 @@ fn sum_as_string(a: usize, b: usize) -> PyResult { #[pymodule] fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(sum_as_string, m)?)?; - + m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?; Ok(()) } diff --git a/stubs/synapse/synapse_rust.pyi b/stubs/synapse/synapse_rust.pyi index 5b51ba05d7..8658d3138f 100644 --- a/stubs/synapse/synapse_rust.pyi +++ b/stubs/synapse/synapse_rust.pyi @@ -1 +1,2 @@ def sum_as_string(a: int, b: int) -> str: ... +def get_rust_file_digest() -> str: ... diff --git a/synapse/__init__.py b/synapse/__init__.py index b1369aca8f..1bed6393bd 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -20,6 +20,8 @@ import json import os import sys +from synapse.util.rust import check_rust_lib_up_to_date + # Check that we're not running on an unsupported Python version. if sys.version_info < (3, 7): print("Synapse requires Python 3.7 or above.") @@ -78,3 +80,6 @@ if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): from synapse.util.patch_inline_callbacks import do_patch do_patch() + + +check_rust_lib_up_to_date() diff --git a/synapse/util/rust.py b/synapse/util/rust.py new file mode 100644 index 0000000000..30ecb9ffd9 --- /dev/null +++ b/synapse/util/rust.py @@ -0,0 +1,84 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from hashlib import blake2b + +import synapse +from synapse.synapse_rust import get_rust_file_digest + + +def check_rust_lib_up_to_date() -> None: + """For editable installs check if the rust library is outdated and needs to + be rebuilt. + """ + + if not _dist_is_editable(): + return + + synapse_dir = os.path.dirname(synapse.__file__) + synapse_root = os.path.abspath(os.path.join(synapse_dir, "..")) + + # Double check we've not gone into site-packages... + if os.path.basename(synapse_root) == "site-packages": + return + + # ... and it looks like the root of a python project. + if not os.path.exists("pyproject.toml"): + return + + # Get the hash of all Rust source files + hash = _hash_rust_files_in_directory(os.path.join(synapse_root, "rust", "src")) + + if hash != get_rust_file_digest(): + raise Exception("Rust module outdated. Please rebuild using `poetry install`") + + +def _hash_rust_files_in_directory(directory: str) -> str: + """Get the hash of all files in a directory (recursively)""" + + directory = os.path.abspath(directory) + + paths = [] + + dirs = [directory] + while dirs: + dir = dirs.pop() + with os.scandir(dir) as d: + for entry in d: + if entry.is_dir(): + dirs.append(entry.path) + else: + paths.append(entry.path) + + # We sort to make sure that we get a consistent and well-defined ordering. + paths.sort() + + hasher = blake2b() + + for path in paths: + with open(os.path.join(directory, path), "rb") as f: + hasher.update(f.read()) + + return hasher.hexdigest() + + +def _dist_is_editable() -> bool: + """Is distribution an editable install?""" + for path_item in sys.path: + egg_link = os.path.join(path_item, "matrix-synapse.egg-link") + if os.path.isfile(egg_link): + return True + return False -- cgit 1.4.1 From da41a7cd618d11b05c2c04c39068fd4b1e1b7894 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Mon, 12 Sep 2022 12:58:33 +0100 Subject: Remove check current state membership up to date (#13745) * Remove checks for membership column in current_state_events * Add schema script to force through the `current_state_events_membership` background job Contributed by Nick @ Beeper (@fizzadar). --- changelog.d/13745.misc | 1 + synapse/storage/databases/main/roommember.py | 202 +++++---------------- ...force_update_current_state_events_membership.py | 52 ++++++ 3 files changed, 100 insertions(+), 155 deletions(-) create mode 100644 changelog.d/13745.misc create mode 100644 synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py diff --git a/changelog.d/13745.misc b/changelog.d/13745.misc new file mode 100644 index 0000000000..e97a789c0e --- /dev/null +++ b/changelog.d/13745.misc @@ -0,0 +1 @@ +Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 6e1ff5626b..fdb4684e12 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -32,10 +32,7 @@ import attr from synapse.api.constants import EventTypes, Membership from synapse.metrics import LaterGauge -from synapse.metrics.background_process_metrics import ( - run_as_background_process, - wrap_as_background_process, -) +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -91,16 +88,6 @@ class RoomMemberWorkerStore(EventsWorkerStore): # at a time. Keyed by room_id. self._joined_host_linearizer = Linearizer("_JoinedHostsCache") - # Is the current_state_events.membership up to date? Or is the - # background update still running? - self._current_state_events_membership_up_to_date = False - - txn = db_conn.cursor( - txn_name="_check_safe_current_state_events_membership_updated" - ) - self._check_safe_current_state_events_membership_updated_txn(txn) - txn.close() - if ( self.hs.config.worker.run_background_tasks and self.hs.config.metrics.metrics_flags.known_servers @@ -157,34 +144,6 @@ class RoomMemberWorkerStore(EventsWorkerStore): self._known_servers_count = max([count, 1]) return self._known_servers_count - def _check_safe_current_state_events_membership_updated_txn( - self, txn: LoggingTransaction - ) -> None: - """Checks if it is safe to assume the new current_state_events - membership column is up to date - """ - - pending_update = self.db_pool.simple_select_one_txn( - txn, - table="background_updates", - keyvalues={"update_name": _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME}, - retcols=["update_name"], - allow_none=True, - ) - - self._current_state_events_membership_up_to_date = not pending_update - - # If the update is still running, reschedule to run. - if pending_update: - self._clock.call_later( - 15.0, - run_as_background_process, - "_check_safe_current_state_events_membership_updated", - self.db_pool.runInteraction, - "_check_safe_current_state_events_membership_updated", - self._check_safe_current_state_events_membership_updated_txn, - ) - @cached(max_entries=100000, iterable=True) async def get_users_in_room(self, room_id: str) -> List[str]: """ @@ -212,31 +171,14 @@ class RoomMemberWorkerStore(EventsWorkerStore): `get_current_hosts_in_room()` and so we can re-use the cache but it's not horrible to have here either. """ - # If we can assume current_state_events.membership is up to date - # then we can avoid a join, which is a Very Good Thing given how - # frequently this function gets called. - if self._current_state_events_membership_up_to_date: - sql = """ - SELECT c.state_key FROM current_state_events as c - /* Get the depth of the event from the events table */ - INNER JOIN events AS e USING (event_id) - WHERE c.type = 'm.room.member' AND c.room_id = ? AND membership = ? - /* Sorted by lowest depth first */ - ORDER BY e.depth ASC; - """ - else: - sql = """ - SELECT c.state_key FROM room_memberships as m - /* Get the depth of the event from the events table */ - INNER JOIN events AS e USING (event_id) - INNER JOIN current_state_events as c - ON m.event_id = c.event_id - AND m.room_id = c.room_id - AND m.user_id = c.state_key - WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? - /* Sorted by lowest depth first */ - ORDER BY e.depth ASC; - """ + sql = """ + SELECT c.state_key FROM current_state_events as c + /* Get the depth of the event from the events table */ + INNER JOIN events AS e USING (event_id) + WHERE c.type = 'm.room.member' AND c.room_id = ? AND membership = ? + /* Sorted by lowest depth first */ + ORDER BY e.depth ASC; + """ txn.execute(sql, (room_id, Membership.JOIN)) return [r[0] for r in txn] @@ -353,28 +295,14 @@ class RoomMemberWorkerStore(EventsWorkerStore): # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats - # If we can assume current_state_events.membership is up to date - # then we can avoid a join, which is a Very Good Thing given how - # frequently this function gets called. - if self._current_state_events_membership_up_to_date: - # Note, rejected events will have a null membership field, so - # we we manually filter them out. - sql = """ - SELECT count(*), membership FROM current_state_events - WHERE type = 'm.room.member' AND room_id = ? - AND membership IS NOT NULL - GROUP BY membership - """ - else: - sql = """ - SELECT count(*), m.membership FROM room_memberships as m - INNER JOIN current_state_events as c - ON m.event_id = c.event_id - AND m.room_id = c.room_id - AND m.user_id = c.state_key - WHERE c.type = 'm.room.member' AND c.room_id = ? - GROUP BY m.membership - """ + # Note, rejected events will have a null membership field, so + # we we manually filter them out. + sql = """ + SELECT count(*), membership FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL + GROUP BY membership + """ txn.execute(sql, (room_id,)) res: Dict[str, MemberSummary] = {} @@ -383,30 +311,18 @@ class RoomMemberWorkerStore(EventsWorkerStore): # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent - if self._current_state_events_membership_up_to_date: - # Note, rejected events will have a null membership field, so - # we we manually filter them out. - sql = """ - SELECT state_key, membership, event_id - FROM current_state_events - WHERE type = 'm.room.member' AND room_id = ? - AND membership IS NOT NULL - ORDER BY - CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, - event_id ASC - LIMIT ? - """ - else: - sql = """ - SELECT c.state_key, m.membership, c.event_id - FROM room_memberships as m - INNER JOIN current_state_events as c USING (room_id, event_id) - WHERE c.type = 'm.room.member' AND c.room_id = ? - ORDER BY - CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, - c.event_id ASC - LIMIT ? - """ + # Note, rejected events will have a null membership field, so + # we we manually filter them out. + sql = """ + SELECT state_key, membership, event_id + FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL + ORDER BY + CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, + event_id ASC + LIMIT ? + """ # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) @@ -649,27 +565,15 @@ class RoomMemberWorkerStore(EventsWorkerStore): # We use `current_state_events` here and not `local_current_membership` # as a) this gets called with remote users and b) this only gets called # for rooms the server is participating in. - if self._current_state_events_membership_up_to_date: - sql = """ - SELECT room_id, e.instance_name, e.stream_ordering - FROM current_state_events AS c - INNER JOIN events AS e USING (room_id, event_id) - WHERE - c.type = 'm.room.member' - AND c.state_key = ? - AND c.membership = ? - """ - else: - sql = """ - SELECT room_id, e.instance_name, e.stream_ordering - FROM current_state_events AS c - INNER JOIN room_memberships AS m USING (room_id, event_id) - INNER JOIN events AS e USING (room_id, event_id) - WHERE - c.type = 'm.room.member' - AND c.state_key = ? - AND m.membership = ? - """ + sql = """ + SELECT room_id, e.instance_name, e.stream_ordering + FROM current_state_events AS c + INNER JOIN events AS e USING (room_id, event_id) + WHERE + c.type = 'm.room.member' + AND c.state_key = ? + AND c.membership = ? + """ txn.execute(sql, (user_id, Membership.JOIN)) return frozenset( @@ -707,27 +611,15 @@ class RoomMemberWorkerStore(EventsWorkerStore): user_ids, ) - if self._current_state_events_membership_up_to_date: - sql = f""" - SELECT c.state_key, room_id, e.instance_name, e.stream_ordering - FROM current_state_events AS c - INNER JOIN events AS e USING (room_id, event_id) - WHERE - c.type = 'm.room.member' - AND c.membership = ? - AND {clause} - """ - else: - sql = f""" - SELECT c.state_key, room_id, e.instance_name, e.stream_ordering - FROM current_state_events AS c - INNER JOIN room_memberships AS m USING (room_id, event_id) - INNER JOIN events AS e USING (room_id, event_id) - WHERE - c.type = 'm.room.member' - AND m.membership = ? - AND {clause} - """ + sql = f""" + SELECT c.state_key, room_id, e.instance_name, e.stream_ordering + FROM current_state_events AS c + INNER JOIN events AS e USING (room_id, event_id) + WHERE + c.type = 'm.room.member' + AND c.membership = ? + AND {clause} + """ txn.execute(sql, [Membership.JOIN] + args) diff --git a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py new file mode 100644 index 0000000000..b5853d125c --- /dev/null +++ b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py @@ -0,0 +1,52 @@ +# Copyright 2022 Beeper +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Forces through the `current_state_events_membership` background job so checks +for its completion can be removed. + +Note the background job must still remain defined in the database class. +""" + + +def run_upgrade(cur, database_engine, *args, **kwargs): + cur.execute("SELECT update_name FROM background_updates") + rows = cur.fetchall() + for row in rows: + if row[0] == "current_state_events_membership": + break + # No pending background job so nothing to do here + else: + return + + # Populate membership field for all current_state_events, this may take + # a while but was originally handled via a background update in 2019. + cur.execute( + """ + UPDATE current_state_events + SET membership = ( + SELECT membership FROM room_memberships + WHERE event_id = current_state_events.event_id + ) + """ + ) + + # Finally, delete the background job because we've handled it above + cur.execute( + """ + DELETE FROM background_updates + WHERE update_name = 'current_state_events_membership' + """ + ) -- cgit 1.4.1 From dd7484b5628dc297d0324198c9f5765247f982b5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Sep 2022 13:26:33 +0100 Subject: Fix CI on non-PR builds (#13769) Mark cargo-test as skippable since it only runs on Rust code change. --- .github/workflows/tests.yml | 1 + changelog.d/13769.misc | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/13769.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7c4ae3d7ff..b4dd74ea4f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -488,3 +488,4 @@ jobs: # The newsfile lint may be skipped on non PR builds skippable: lint-newsfile + cargo-test diff --git a/changelog.d/13769.misc b/changelog.d/13769.misc new file mode 100644 index 0000000000..2e0dd68a0f --- /dev/null +++ b/changelog.d/13769.misc @@ -0,0 +1 @@ +Add a stub Rust crate. -- cgit 1.4.1 From 75713374454ef36ec549018344996e06a67dd2bf Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 12 Sep 2022 15:11:18 +0200 Subject: Fix typo in ratelimiting documentation (#13727) --- changelog.d/13727.doc | 1 + docs/usage/configuration/config_documentation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13727.doc diff --git a/changelog.d/13727.doc b/changelog.d/13727.doc new file mode 100644 index 0000000000..ba530b409d --- /dev/null +++ b/changelog.d/13727.doc @@ -0,0 +1 @@ +Fix a typo in the documentation for the login ratelimiting configuration. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index ae490d13a4..cd546041b2 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1393,7 +1393,7 @@ This option specifies several limits for login: client is attempting to log into. Defaults to `per_second: 0.17`, `burst_count: 3`. -* `failted_attempts` ratelimits login requests based on the account the +* `failed_attempts` ratelimits login requests based on the account the client is attempting to log into, based on the amount of failed login attempts for this account. Defaults to `per_second: 0.17`, `burst_count: 3`. -- cgit 1.4.1 From fa2f3d8d0c915c6ebd84ef53526ac04c91fd4a19 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 12 Sep 2022 19:31:23 +0200 Subject: Fix GHA skippable syntax (#13778) Signed-off-by: Mathieu Velten --- .github/workflows/tests.yml | 3 ++- changelog.d/13778.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13778.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b4dd74ea4f..bf70f8373e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -486,6 +486,7 @@ jobs: needs: ${{ toJSON(needs) }} # The newsfile lint may be skipped on non PR builds - skippable: + # Cargo test is skipped if there is no changes on Rust code + skippable: | lint-newsfile cargo-test diff --git a/changelog.d/13778.misc b/changelog.d/13778.misc new file mode 100644 index 0000000000..2e0dd68a0f --- /dev/null +++ b/changelog.d/13778.misc @@ -0,0 +1 @@ +Add a stub Rust crate. -- cgit 1.4.1 From cdbb6412327b542e0dead792717fe58253291131 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Tue, 13 Sep 2022 08:16:37 +0100 Subject: Add receipts event stream ordering (#13703) --- changelog.d/13703.misc | 1 + synapse/_scripts/synapse_port_db.py | 2 + synapse/storage/databases/main/receipts.py | 74 +++++++++++++++++++++- .../delta/72/05receipts_event_stream_ordering.sql | 19 ++++++ 4 files changed, 95 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13703.misc create mode 100644 synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql diff --git a/changelog.d/13703.misc b/changelog.d/13703.misc new file mode 100644 index 0000000000..685a29b17d --- /dev/null +++ b/changelog.d/13703.misc @@ -0,0 +1 @@ +Add & populate `event_stream_ordering` column on receipts table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 543bba27c2..30983c47fb 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -67,6 +67,7 @@ from synapse.storage.databases.main.media_repository import ( ) from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore from synapse.storage.databases.main.pusher import PusherWorkerStore +from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore from synapse.storage.databases.main.registration import ( RegistrationBackgroundUpdateStore, find_max_generated_user_id_localpart, @@ -203,6 +204,7 @@ class Store( PushRuleStore, PusherWorkerStore, PresenceBackgroundUpdateStore, + ReceiptsBackgroundUpdateStore, ): def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 3838409519..719a12b0ae 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -675,6 +675,7 @@ class ReceiptsWorkerStore(SQLBaseStore): values={ "stream_id": stream_id, "event_id": event_id, + "event_stream_ordering": stream_ordering, "data": json_encoder.encode(data), }, # receipts_linearized has a unique constraint on @@ -830,5 +831,76 @@ class ReceiptsWorkerStore(SQLBaseStore): ) -class ReceiptsStore(ReceiptsWorkerStore): +class ReceiptsBackgroundUpdateStore(SQLBaseStore): + POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering" + + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_update_handler( + self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING, + self._populate_receipt_event_stream_ordering, + ) + + async def _populate_receipt_event_stream_ordering( + self, progress: JsonDict, batch_size: int + ) -> int: + def _populate_receipt_event_stream_ordering_txn( + txn: LoggingTransaction, + ) -> bool: + + if "max_stream_id" in progress: + max_stream_id = progress["max_stream_id"] + else: + txn.execute("SELECT max(stream_id) FROM receipts_linearized") + res = txn.fetchone() + if res is None or res[0] is None: + return True + else: + max_stream_id = res[0] + + start = progress.get("stream_id", 0) + stop = start + batch_size + + sql = """ + UPDATE receipts_linearized + SET event_stream_ordering = ( + SELECT stream_ordering + FROM events + WHERE event_id = receipts_linearized.event_id + ) + WHERE stream_id >= ? AND stream_id < ? + """ + txn.execute(sql, (start, stop)) + + self.db_pool.updates._background_update_progress_txn( + txn, + self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING, + { + "stream_id": stop, + "max_stream_id": max_stream_id, + }, + ) + + return stop > max_stream_id + + finished = await self.db_pool.runInteraction( + "_remove_devices_from_device_inbox_txn", + _populate_receipt_event_stream_ordering_txn, + ) + + if finished: + await self.db_pool.updates._end_background_update( + self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING + ) + + return batch_size + + +class ReceiptsStore(ReceiptsWorkerStore, ReceiptsBackgroundUpdateStore): pass diff --git a/synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql b/synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql new file mode 100644 index 0000000000..2a822f4509 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql @@ -0,0 +1,19 @@ +/* Copyright 2022 Beeper + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE receipts_linearized ADD COLUMN event_stream_ordering BIGINT; + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_event_stream_ordering', '{}'); -- cgit 1.4.1 From 4b678b20a2f55d6c11ed1acfd8f517d15ef49c6e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Sep 2022 09:20:28 +0100 Subject: 1.67.0 --- CHANGES.md | 10 ++++++++-- debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 0300fcbd30..244dc6b0d6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,5 @@ -Synapse 1.67.0rc1 (2022-09-06) -============================== +Synapse 1.67.0 (2022-09-13) +=========================== This release removes using the deprecated direct TCP replication configuration for workers. Server admins should use Redis instead. See the [upgrade @@ -19,6 +19,12 @@ database will require SQLite version 3.27.0 or higher. (The [current minimum See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). for more details. +No significant changes since 1.67.0rc1. + + +Synapse 1.67.0rc1 (2022-09-06) +============================== + Features -------- diff --git a/debian/changelog b/debian/changelog index 4fa54dc5ee..77c8f7ad0b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.67.0) stable; urgency=medium + + * New Synapse release 1.67.0. + + -- Synapse Packaging team Tue, 13 Sep 2022 09:19:56 +0100 + matrix-synapse-py3 (1.67.0~rc1) stable; urgency=medium [ Erik Johnston ] diff --git a/pyproject.toml b/pyproject.toml index dfb5c788be..97be0f15eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.67.0rc1" +version = "1.67.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.4.1 From 80bb098d8775cc2ad1bf5abd150913577e643481 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Sep 2022 09:55:10 +0100 Subject: Fixup changelog --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 244dc6b0d6..be44903bfe 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,15 +8,15 @@ notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v167 The minimum version of `poetry` supported for managing source checkouts is now 1.2.0. -**Notice:** from the next major release (v1.68.0) installing Synapse from a source +**Notice:** from the next major release (1.68.0) installing Synapse from a source checkout will require a recent Rust compiler. Those using packages or `pip install matrix-synapse` will not be affected. See the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). -**Notice:** from the next major release (v1.68.0), running Synapse with a SQLite +**Notice:** from the next major release (1.68.0), running Synapse with a SQLite database will require SQLite version 3.27.0 or higher. (The [current minimum version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).) -See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). for more details. +See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670) for more details. No significant changes since 1.67.0rc1. -- cgit 1.4.1 From 41df25bbbd29caeb539269a436fbe6bc57fad93c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 13 Sep 2022 10:01:21 +0100 Subject: installation.md: require libpq on M1 macs (#13480) --- changelog.d/13480.doc | 1 + docs/setup/installation.md | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13480.doc diff --git a/changelog.d/13480.doc b/changelog.d/13480.doc new file mode 100644 index 0000000000..ae5df16367 --- /dev/null +++ b/changelog.d/13480.doc @@ -0,0 +1 @@ +Note that `libpq` is required on ARM-based Macs. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 90737520ba..96833effc6 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -303,9 +303,10 @@ You may need to install the latest Xcode developer tools: xcode-select --install ``` -On ARM-based Macs you may need to explicitly install libjpeg which is a pillow dependency. You can use Homebrew (https://brew.sh): +On ARM-based Macs you may need to install libjpeg and libpq. +You can use Homebrew (https://brew.sh): ```sh - brew install jpeg + brew install jpeg libpq ``` On macOS Catalina (10.15) you may need to explicitly install OpenSSL -- cgit 1.4.1 From 540afb0bfcc407328a75b0b9910f26b407dbcaed Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 13 Sep 2022 10:17:23 +0100 Subject: Simplify CI tests DAG (#13784) * Simplify CI tests DAG * Changelog --- .github/workflows/tests.yml | 4 ---- changelog.d/13784.misc | 1 + 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/13784.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bf70f8373e..a5a217d015 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -468,10 +468,6 @@ jobs: tests-done: if: ${{ always() }} needs: - - check-sampleconfig - - lint - - lint-crlf - - lint-newsfile - trial - trial-olddeps - sytest diff --git a/changelog.d/13784.misc b/changelog.d/13784.misc new file mode 100644 index 0000000000..e7a542cd80 --- /dev/null +++ b/changelog.d/13784.misc @@ -0,0 +1 @@ +Simplify the dependency DAG in the tests workflow. -- cgit 1.4.1 From b60d47ab2c55580fc1941497964cd33c27838231 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 13 Sep 2022 10:53:11 +0100 Subject: Updates to the schema dump script (#13770) --- changelog.d/13770.misc | 1 + scripts-dev/make_full_schema.sh | 48 ++++++++-------------- .../storage/schema/state/delta/30/state_stream.sql | 4 ++ 3 files changed, 21 insertions(+), 32 deletions(-) create mode 100644 changelog.d/13770.misc diff --git a/changelog.d/13770.misc b/changelog.d/13770.misc new file mode 100644 index 0000000000..36ac91400a --- /dev/null +++ b/changelog.d/13770.misc @@ -0,0 +1 @@ +Update the script which makes full schema dumps. diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index f0e22d4ca2..61394360ce 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -9,8 +9,10 @@ export PGHOST="localhost" POSTGRES_DB_NAME="synapse_full_schema.$$" -SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite" -POSTGRES_FULL_SCHEMA_OUTPUT_FILE="full.sql.postgres" +SQLITE_SCHEMA_FILE="schema.sql.sqlite" +SQLITE_ROWS_FILE="rows.sql.sqlite" +POSTGRES_SCHEMA_FILE="full.sql.postgres" +POSTGRES_ROWS_FILE="rows.sql.postgres" REQUIRED_DEPS=("matrix-synapse" "psycopg2") @@ -22,7 +24,7 @@ usage() { echo " Username to connect to local postgres instance. The password will be requested" echo " during script execution." echo "-c" - echo " CI mode. Enables coverage tracking and prints every command that the script runs." + echo " CI mode. Prints every command that the script runs." echo "-o " echo " Directory to output full schema files to." echo "-h" @@ -37,11 +39,6 @@ while getopts "p:co:h" opt; do c) # Print all commands that are being executed set -x - - # Modify required dependencies for coverage - REQUIRED_DEPS+=("coverage" "coverage-enable-subprocess") - - COVERAGE=1 ;; o) command -v realpath > /dev/null || (echo "The -o flag requires the 'realpath' binary to be installed" && exit 1) @@ -102,6 +99,7 @@ SQLITE_DB=$TMPDIR/homeserver.db POSTGRES_CONFIG=$TMPDIR/postgres.conf # Ensure these files are delete on script exit +# TODO: the trap should also drop the temp postgres DB trap 'rm -rf $TMPDIR' EXIT cat > "$SQLITE_CONFIG" < "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE" +echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_SCHEMA_FILE' and '$OUTPUT_DIR/$SQLITE_ROWS_FILE'..." +sqlite3 "$SQLITE_DB" ".schema --indent" > "$OUTPUT_DIR/$SQLITE_SCHEMA_FILE" +sqlite3 "$SQLITE_DB" ".dump --data-only --nosys" > "$OUTPUT_DIR/$SQLITE_ROWS_FILE" -echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE'..." -pg_dump --format=plain --no-tablespaces --no-acl --no-owner $POSTGRES_DB_NAME | sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE" +echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE' and '$OUTPUT_DIR/$POSTGRES_ROWS_FILE'..." +pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE" +pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_ROWS_FILE" echo "Cleaning up temporary Postgres database..." dropdb $POSTGRES_DB_NAME diff --git a/synapse/storage/schema/state/delta/30/state_stream.sql b/synapse/storage/schema/state/delta/30/state_stream.sql index e85699e82e..bdaf8b02d5 100644 --- a/synapse/storage/schema/state/delta/30/state_stream.sql +++ b/synapse/storage/schema/state/delta/30/state_stream.sql @@ -26,6 +26,10 @@ * (event, state) pair, we can use that stream_ordering to identify when * the new state was assigned for the event. */ + +/* NB: This table belongs to the `main` logical database; it should not be present + * in `state`. + */ CREATE TABLE IF NOT EXISTS ex_outlier_stream( event_stream_ordering BIGINT PRIMARY KEY NOT NULL, event_id TEXT NOT NULL, -- cgit 1.4.1 From 12dacecabd27680dc77c17724953ecda0801b5ea Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 13 Sep 2022 16:14:28 +0200 Subject: Make sequence `cache_invalidation_stream_seq` begin at `2` (#13766) Signed-off-by: Mathieu Velten Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/13766.bugfix | 1 + synapse/storage/schema/__init__.py | 1 + ...8begin_cache_invalidation_seq_at_2.sql.postgres | 23 ++++++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 changelog.d/13766.bugfix create mode 100644 synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres diff --git a/changelog.d/13766.bugfix b/changelog.d/13766.bugfix new file mode 100644 index 0000000000..c708e54f9c --- /dev/null +++ b/changelog.d/13766.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where the `cache_invalidation_stream_seq` sequence would begin at 1 instead of 2. diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 256f745dc0..32cda5e3ba 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -76,6 +76,7 @@ Changes in SCHEMA_VERSION = 72: - event_edges.(room_id, is_state) are no longer written to. - Tables related to groups are dropped. - Unused column application_services_state.last_txn is dropped + - Cache invalidation stream id sequence now begins at 2 to match code expectation. """ diff --git a/synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres b/synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres new file mode 100644 index 0000000000..69931fe971 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres @@ -0,0 +1,23 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- The sequence needs to begin at 2 because a bunch of code assumes that +-- get_next_id_txn will return values >= 2, cf this comment: +-- https://github.com/matrix-org/synapse/blob/b93bd95e8ab64d27ae26841020f62ee61272a5f2/synapse/storage/util/id_generators.py#L344 + +SELECT setval('cache_invalidation_stream_seq', ( + SELECT COALESCE(MAX(last_value), 1) FROM cache_invalidation_stream_seq +)); -- cgit 1.4.1 From 21687ec189f404bcee98ae61b008afc8c5094400 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 14 Sep 2022 08:28:12 +0000 Subject: Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. (#13789) * Don't accept a trailing slash on the end of /get_missing_events * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/13789.bugfix | 1 + synapse/federation/transport/server/federation.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13789.bugfix diff --git a/changelog.d/13789.bugfix b/changelog.d/13789.bugfix new file mode 100644 index 0000000000..9e1e3e0fa7 --- /dev/null +++ b/changelog.d/13789.bugfix @@ -0,0 +1 @@ +Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. \ No newline at end of file diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index f7884bfbe0..6bb4659c4c 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -549,8 +549,7 @@ class FederationClientKeysClaimServlet(BaseFederationServerServlet): class FederationGetMissingEventsServlet(BaseFederationServerServlet): - # TODO(paul): Why does this path alone end with "/?" optional? - PATH = "/get_missing_events/(?P[^/]*)/?" + PATH = "/get_missing_events/(?P[^/]*)" async def on_POST( self, -- cgit 1.4.1 From c73774467edb04c372caecb9e843542654f7610b Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 14 Sep 2022 10:42:57 +0100 Subject: Fix bug in device list caching when remote users leave rooms (#13749) When a remote user leaves the last room shared with the homeserver, we have to mark their device list as unsubscribed, otherwise we would hold on to a stale device list in our cache. Crucially, the device list would remain cached even after the remote user rejoined the room, which could lead to E2EE failures until the next change to the remote user's device list. Fixes #13651. Signed-off-by: Sean Quah --- changelog.d/13749.bugfix | 1 + synapse/handlers/device.py | 11 ----------- synapse/handlers/e2e_keys.py | 26 ++++++++++++++++++++++++++ synapse/storage/controllers/persist_events.py | 20 +++++++++++++++++--- tests/handlers/test_e2e_keys.py | 8 +++++++- 5 files changed, 51 insertions(+), 15 deletions(-) create mode 100644 changelog.d/13749.bugfix diff --git a/changelog.d/13749.bugfix b/changelog.d/13749.bugfix new file mode 100644 index 0000000000..8ffafec07b --- /dev/null +++ b/changelog.d/13749.bugfix @@ -0,0 +1 @@ +Fix a long standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index c5ac169644..901e2310b7 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -45,7 +45,6 @@ from synapse.types import ( JsonDict, StreamKeyType, StreamToken, - UserID, get_domain_from_id, get_verify_key_from_cross_signing_key, ) @@ -324,8 +323,6 @@ class DeviceHandler(DeviceWorkerHandler): self.device_list_updater.incoming_device_list_update, ) - hs.get_distributor().observe("user_left_room", self.user_left_room) - # Whether `_handle_new_device_update_async` is currently processing. self._handle_new_device_update_is_processing = False @@ -569,14 +566,6 @@ class DeviceHandler(DeviceWorkerHandler): StreamKeyType.DEVICE_LIST, position, users=[from_user_id] ) - async def user_left_room(self, user: UserID, room_id: str) -> None: - user_id = user.to_string() - room_ids = await self.store.get_rooms_for_user(user_id) - if not room_ids: - # We no longer share rooms with this user, so we'll no longer - # receive device updates. Mark this in DB. - await self.store.mark_remote_user_device_list_as_unsubscribed(user_id) - async def store_dehydrated_device( self, user_id: str, diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index ec81639c78..8eed63ccf3 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -175,6 +175,32 @@ class E2eKeysHandler: user_ids_not_in_cache, remote_results, ) = await self.store.get_user_devices_from_cache(query_list) + + # Check that the homeserver still shares a room with all cached users. + # Note that this check may be slightly racy when a remote user leaves a + # room after we have fetched their cached device list. In the worst case + # we will do extra federation queries for devices that we had cached. + cached_users = set(remote_results.keys()) + valid_cached_users = ( + await self.store.get_users_server_still_shares_room_with( + remote_results.keys() + ) + ) + invalid_cached_users = cached_users - valid_cached_users + if invalid_cached_users: + # Fix up results. If we get here, there is either a bug in device + # list tracking, or we hit the race mentioned above. + user_ids_not_in_cache.update(invalid_cached_users) + for invalid_user_id in invalid_cached_users: + remote_results.pop(invalid_user_id) + # This log message may be removed if it turns out it's almost + # entirely triggered by races. + logger.error( + "Devices for %s were cached, but the server no longer shares " + "any rooms with them. The cached device lists are stale.", + invalid_cached_users, + ) + for user_id, devices in remote_results.items(): user_devices = results.setdefault(user_id, {}) for device_id, device in devices.items(): diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index dad3731b9b..501dbbc990 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -598,9 +598,9 @@ class EventsPersistenceStorageController: # room state_delta_for_room: Dict[str, DeltaState] = {} - # Set of remote users which were in rooms the server has left. We - # should check if we still share any rooms and if not we mark their - # device lists as stale. + # Set of remote users which were in rooms the server has left or who may + # have left rooms the server is in. We should check if we still share any + # rooms and if not we mark their device lists as stale. potentially_left_users: Set[str] = set() if not backfilled: @@ -725,6 +725,20 @@ class EventsPersistenceStorageController: current_state = {} delta.no_longer_in_room = True + # Add all remote users that might have left rooms. + potentially_left_users.update( + user_id + for event_type, user_id in delta.to_delete + if event_type == EventTypes.Member + and not self.is_mine_id(user_id) + ) + potentially_left_users.update( + user_id + for event_type, user_id in delta.to_insert.keys() + if event_type == EventTypes.Member + and not self.is_mine_id(user_id) + ) + state_delta_for_room[room_id] = delta await self.persist_events_store._persist_events_and_state_updates( diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 1e6ad4b663..95698bc275 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -891,6 +891,12 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): new_callable=mock.MagicMock, return_value=make_awaitable(["some_room_id"]), ) + mock_get_users = mock.patch.object( + self.store, + "get_users_server_still_shares_room_with", + new_callable=mock.MagicMock, + return_value=make_awaitable({remote_user_id}), + ) mock_request = mock.patch.object( self.hs.get_federation_client(), "query_user_devices", @@ -898,7 +904,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): return_value=make_awaitable(response_body), ) - with mock_get_rooms, mock_request as mocked_federation_request: + with mock_get_rooms, mock_get_users, mock_request as mocked_federation_request: # Make the first query and sanity check it succeeds. response_1 = self.get_success( e2e_handler.query_devices( -- cgit 1.4.1 From 51a77e990b7a59e460ab22a2788ab8c3506b9a2c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 14 Sep 2022 14:16:12 +0100 Subject: Remove incorrect migration file from `state` logical DB (#13788) * Remove incorrect migration file from `state` logical DB The table `ex_outlier_stream` is part of the `main` logical DB; it should not have been created in the `state` logical DB. We remove this migration now as a tidy-up. Note: we cannot `DROP TABLE IF EXISTS ex_outlier_stream` in a new migration, because some (most) instances of Synapse host both of these logical DBs on the same DB cluster. * Changelog --- changelog.d/13788.misc | 1 + .../storage/schema/state/delta/30/state_stream.sql | 37 ---------------------- 2 files changed, 1 insertion(+), 37 deletions(-) create mode 100644 changelog.d/13788.misc delete mode 100644 synapse/storage/schema/state/delta/30/state_stream.sql diff --git a/changelog.d/13788.misc b/changelog.d/13788.misc new file mode 100644 index 0000000000..7263b1ac52 --- /dev/null +++ b/changelog.d/13788.misc @@ -0,0 +1 @@ +Remove an old, incorrect migration file. diff --git a/synapse/storage/schema/state/delta/30/state_stream.sql b/synapse/storage/schema/state/delta/30/state_stream.sql deleted file mode 100644 index bdaf8b02d5..0000000000 --- a/synapse/storage/schema/state/delta/30/state_stream.sql +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/* We used to create a table called current_state_resets, but this is no - * longer used and is removed in delta 54. - */ - -/* The outlier events that have aquired a state group typically through - * backfill. This is tracked separately to the events table, as assigning a - * state group change the position of the existing event in the stream - * ordering. - * However since a stream_ordering is assigned in persist_event for the - * (event, state) pair, we can use that stream_ordering to identify when - * the new state was assigned for the event. - */ - -/* NB: This table belongs to the `main` logical database; it should not be present - * in `state`. - */ -CREATE TABLE IF NOT EXISTS ex_outlier_stream( - event_stream_ordering BIGINT PRIMARY KEY NOT NULL, - event_id TEXT NOT NULL, - state_group BIGINT NOT NULL -); -- cgit 1.4.1 From eaed4e6113f5ed40056fa02ae922cb273d02be6e Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 14 Sep 2022 16:33:54 +0200 Subject: Remove unused method in `synapse.api.auth.Auth`. (#13795) Clean-up from b19060a29b4f73897847db2aba5d03ec819086e0 (#13094) and 73af10f419346a5f2d70131ac1ed8e69942edca0 (#13093) which removed all callers. --- changelog.d/13795.misc | 1 + synapse/api/auth.py | 9 --------- 2 files changed, 1 insertion(+), 9 deletions(-) create mode 100644 changelog.d/13795.misc diff --git a/changelog.d/13795.misc b/changelog.d/13795.misc new file mode 100644 index 0000000000..20d90cc130 --- /dev/null +++ b/changelog.d/13795.misc @@ -0,0 +1 @@ +Remove unused method in `synapse.api.auth.Auth`. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 4a75eb6b21..3d7f986ac7 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -459,15 +459,6 @@ class Auth: ) raise InvalidClientTokenError("Invalid access token passed.") - def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService: - token = self.get_access_token_from_request(request) - service = self.store.get_app_service_by_token(token) - if not service: - logger.warning("Unrecognised appservice access token.") - raise InvalidClientTokenError() - request.requester = create_requester(service.sender, app_service=service) - return service - async def is_server_admin(self, requester: Requester) -> bool: """Check if the given user is a local server admin. -- cgit 1.4.1 From cf65433de26ecce551c64e56d9ee8435c99defab Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 14 Sep 2022 15:29:05 +0000 Subject: Fix a memory leak when running the unit tests. (#13798) --- changelog.d/13798.misc | 1 + synapse/util/caches/__init__.py | 3 ++- synapse/util/metrics.py | 10 +++++----- 3 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13798.misc diff --git a/changelog.d/13798.misc b/changelog.d/13798.misc new file mode 100644 index 0000000000..e4ec2d77d6 --- /dev/null +++ b/changelog.d/13798.misc @@ -0,0 +1 @@ +Fix a memory leak when running the unit tests. \ No newline at end of file diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 35c0be08b0..f7c3a6794e 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -205,8 +205,9 @@ def register_cache( add_resizable_cache(cache_name, resize_callback) metric = CacheMetric(cache, cache_type, cache_name, collect_callback) + metric_name = "cache_%s_%s" % (cache_type, cache_name) caches_by_name[cache_name] = cache - CACHE_METRIC_REGISTRY.register_hook(metric.collect) + CACHE_METRIC_REGISTRY.register_hook(metric_name, metric.collect) return metric diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 9687120ebf..165480bdbe 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -15,7 +15,7 @@ import logging from functools import wraps from types import TracebackType -from typing import Awaitable, Callable, Generator, List, Optional, Type, TypeVar +from typing import Awaitable, Callable, Dict, Generator, Optional, Type, TypeVar from prometheus_client import CollectorRegistry, Counter, Metric from typing_extensions import Concatenate, ParamSpec, Protocol @@ -220,21 +220,21 @@ class DynamicCollectorRegistry(CollectorRegistry): def __init__(self) -> None: super().__init__() - self._pre_update_hooks: List[Callable[[], None]] = [] + self._pre_update_hooks: Dict[str, Callable[[], None]] = {} def collect(self) -> Generator[Metric, None, None]: """ Collects metrics, calling pre-update hooks first. """ - for pre_update_hook in self._pre_update_hooks: + for pre_update_hook in self._pre_update_hooks.values(): pre_update_hook() yield from super().collect() - def register_hook(self, hook: Callable[[], None]) -> None: + def register_hook(self, metric_name: str, hook: Callable[[], None]) -> None: """ Registers a hook that is called before metric collection. """ - self._pre_update_hooks.append(hook) + self._pre_update_hooks[metric_name] = hook -- cgit 1.4.1 From 6302753012927b63feddc71dd287e2d3554707d4 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 14 Sep 2022 15:53:18 +0000 Subject: Deduplicate `is_server_notices_room`. (#13780) --- changelog.d/13780.misc | 1 + synapse/handlers/message.py | 10 +--------- synapse/handlers/room_member.py | 10 +--------- synapse/storage/databases/main/roommember.py | 17 +++++++++++++++++ 4 files changed, 20 insertions(+), 18 deletions(-) create mode 100644 changelog.d/13780.misc diff --git a/changelog.d/13780.misc b/changelog.d/13780.misc new file mode 100644 index 0000000000..1bcac51cad --- /dev/null +++ b/changelog.d/13780.misc @@ -0,0 +1 @@ +Deduplicate `is_server_notices_room`. \ No newline at end of file diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 72157d5a36..e07cda133a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -752,20 +752,12 @@ class EventCreationHandler: if builder.type == EventTypes.Member: membership = builder.content.get("membership", None) if membership == Membership.JOIN: - return await self._is_server_notices_room(builder.room_id) + return await self.store.is_server_notice_room(builder.room_id) elif membership == Membership.LEAVE: # the user is always allowed to leave (but not kick people) return builder.state_key == requester.user.to_string() return False - async def _is_server_notices_room(self, room_id: str) -> bool: - if self.config.servernotices.server_notices_mxid is None: - return False - is_server_notices_room = await self.store.check_local_user_in_room( - user_id=self.config.servernotices.server_notices_mxid, room_id=room_id - ) - return is_server_notices_room - async def assert_accepted_privacy_policy(self, requester: Requester) -> None: """Check if a user has accepted the privacy policy diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 5d4adf5bfd..8d01f4bf2b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -837,7 +837,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): old_membership == Membership.INVITE and effective_membership_state == Membership.LEAVE ): - is_blocked = await self._is_server_notice_room(room_id) + is_blocked = await self.store.is_server_notice_room(room_id) if is_blocked: raise SynapseError( HTTPStatus.FORBIDDEN, @@ -1617,14 +1617,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): return False - async def _is_server_notice_room(self, room_id: str) -> bool: - if self._server_notices_mxid is None: - return False - is_server_notices_room = await self.store.check_local_user_in_room( - user_id=self._server_notices_mxid, room_id=room_id - ) - return is_server_notices_room - class RoomMemberMasterHandler(RoomMemberHandler): def __init__(self, hs: "HomeServer"): diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index fdb4684e12..a8d224602a 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -88,6 +88,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): # at a time. Keyed by room_id. self._joined_host_linearizer = Linearizer("_JoinedHostsCache") + self._server_notices_mxid = hs.config.servernotices.server_notices_mxid + if ( self.hs.config.worker.run_background_tasks and self.hs.config.metrics.metrics_flags.known_servers @@ -504,6 +506,21 @@ class RoomMemberWorkerStore(EventsWorkerStore): return membership == Membership.JOIN + async def is_server_notice_room(self, room_id: str) -> bool: + """ + Determines whether the given room is a 'Server Notices' room, used for + sending server notices to a user. + + This is determined by seeing whether the server notices user is present + in the room. + """ + if self._server_notices_mxid is None: + return False + is_server_notices_room = await self.check_local_user_in_room( + user_id=self._server_notices_mxid, room_id=room_id + ) + return is_server_notices_room + async def get_local_current_membership_for_user_in_room( self, user_id: str, room_id: str ) -> Tuple[Optional[str], Optional[str]]: -- cgit 1.4.1 From f2d12ccabef17faa0bf6b34fbb6d944849afc4d4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 14 Sep 2022 12:01:42 -0400 Subject: Use partial indices on SQLIte. (#13802) Partial indices have been supported since SQLite 3.8, but Synapse now requires >= 3.27, so we can enable support for them. This requires rebuilding previous indices which were partial on PostgreSQL, but not on SQLite. --- changelog.d/13802.misc | 1 + synapse/storage/background_updates.py | 6 +-- .../storage/databases/main/event_push_actions.py | 1 - .../main/delta/72/09partial_indices.sql.sqlite | 56 ++++++++++++++++++++++ 4 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 changelog.d/13802.misc create mode 100644 synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite diff --git a/changelog.d/13802.misc b/changelog.d/13802.misc new file mode 100644 index 0000000000..0d55071326 --- /dev/null +++ b/changelog.d/13802.misc @@ -0,0 +1 @@ +Use partial indices on SQLite. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 555b4e77d2..cf1eabc437 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -581,9 +581,6 @@ class BackgroundUpdater: def create_index_sqlite(conn: Connection) -> None: # Sqlite doesn't support concurrent creation of indexes. # - # We don't use partial indices on SQLite as it wasn't introduced - # until 3.8, and wheezy and CentOS 7 have 3.7 - # # We assume that sqlite doesn't give us invalid indices; however # we may still end up with the index existing but the # background_updates not having been recorded if synapse got shut @@ -591,12 +588,13 @@ class BackgroundUpdater: # has supported CREATE TABLE|INDEX IF NOT EXISTS since 3.3.0.) sql = ( "CREATE %(unique)s INDEX IF NOT EXISTS %(name)s ON %(table)s" - " (%(columns)s)" + " (%(columns)s) %(where_clause)s" ) % { "unique": "UNIQUE" if unique else "", "name": index_name, "table": table, "columns": ", ".join(columns), + "where_clause": "WHERE " + where_clause if where_clause else "", } c = conn.cursor() diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index f4a07de2a3..3a3fb8c507 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1255,7 +1255,6 @@ class EventPushActionsStore(EventPushActionsWorkerStore): table="event_push_actions", columns=["highlight", "stream_ordering"], where_clause="highlight=0", - psql_only=True, ) async def get_push_actions_for_user( diff --git a/synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite b/synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite new file mode 100644 index 0000000000..c8dfdf0218 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite @@ -0,0 +1,56 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- SQLite needs to rebuild indices which use partial indices on Postgres, but +-- previously did not use them on SQLite. + +-- Drop each index that was added with register_background_index_update AND specified +-- a where_clause (that existed before this delta). + +-- From events_bg_updates.py +DROP INDEX IF EXISTS event_contains_url_index; +-- There is also a redactions_censored_redacts index, but that gets dropped. +DROP INDEX IF EXISTS redactions_have_censored_ts; +-- There is also a PostgreSQL only index (event_contains_url_index2) +-- which gets renamed to event_contains_url_index. + +-- From roommember.py +DROP INDEX IF EXISTS room_memberships_user_room_forgotten; + +-- From presence.py +DROP INDEX IF EXISTS presence_stream_state_not_offline_idx; + +-- From media_repository.py +DROP INDEX IF EXISTS local_media_repository_url_idx; + +-- From event_push_actions.py +DROP INDEX IF EXISTS event_push_actions_highlights_index; +-- There's also a event_push_actions_stream_highlight_index which was previously +-- PostgreSQL-only. + +-- From state.py +DROP INDEX IF EXISTS current_state_events_member_index; + +-- Re-insert the background jobs to re-create the indices. +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7209, 'event_contains_url_index', '{}', NULL), + (7209, 'redactions_have_censored_ts_idx', '{}', NULL), + (7209, 'room_membership_forgotten_idx', '{}', NULL), + (7209, 'presence_stream_not_offline_index', '{}', NULL), + (7209, 'local_media_repository_url_idx', '{}', NULL), + (7209, 'event_push_actions_highlights_index', '{}', NULL), + (7209, 'event_push_actions_stream_highlight_index', '{}', NULL), + (7209, 'current_state_members_idx', '{}', NULL) +ON CONFLICT (update_name) DO NOTHING; -- cgit 1.4.1 From 666ae877292d4747b9441105e3df8558f7a335c0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 14 Sep 2022 13:11:16 -0400 Subject: Update event push action and receipt tables to support threads. (#13753) Adds a `thread_id` column to the `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` tables. This will notifications to be segmented by the thread in a future pull request. The `thread_id` column stores the root event ID or the special value `"main"`. The `thread_id` column for `event_push_actions` and `event_push_summary` is backfilled with `"main"` for all existing rows. New entries into `event_push_actions` and `event_push_actions_staging` will get the proper thread ID. `receipts_linearized` and `receipts_graph` also gain a `thread_id` column, which is similar, except `NULL` is a special value meaning the receipt is "unthreaded". See MSC3771 and MSC3773 for where this data will be useful. --- changelog.d/13753.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 29 ++--- .../storage/databases/main/event_push_actions.py | 121 ++++++++++++++++++++- synapse/storage/databases/main/events.py | 4 +- synapse/storage/databases/main/receipts.py | 20 ++++ synapse/storage/schema/__init__.py | 6 +- .../main/delta/72/06thread_notifications.sql | 30 +++++ .../main/delta/72/07thread_receipts.sql.postgres | 30 +++++ .../main/delta/72/07thread_receipts.sql.sqlite | 70 ++++++++++++ .../schema/main/delta/72/08thread_receipts.sql | 20 ++++ tests/replication/slave/storage/test_events.py | 1 + 11 files changed, 312 insertions(+), 20 deletions(-) create mode 100644 changelog.d/13753.misc create mode 100644 synapse/storage/schema/main/delta/72/06thread_notifications.sql create mode 100644 synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres create mode 100644 synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite create mode 100644 synapse/storage/schema/main/delta/72/08thread_receipts.sql diff --git a/changelog.d/13753.misc b/changelog.d/13753.misc new file mode 100644 index 0000000000..63de2eb9f9 --- /dev/null +++ b/changelog.d/13753.misc @@ -0,0 +1 @@ +Prepatory work for storing thread IDs for notifications and receipts. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index d1caf8a0f7..3846fbc5f0 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -198,7 +198,7 @@ class BulkPushRuleEvaluator: return pl_event.content if pl_event else {}, sender_level async def _get_mutual_relations( - self, event: EventBase, rules: Iterable[Tuple[PushRule, bool]] + self, parent_id: str, rules: Iterable[Tuple[PushRule, bool]] ) -> Dict[str, Set[Tuple[str, str]]]: """ Fetch event metadata for events which related to the same event as the given event. @@ -206,7 +206,7 @@ class BulkPushRuleEvaluator: If the given event has no relation information, returns an empty dictionary. Args: - event_id: The event ID which is targeted by relations. + parent_id: The event ID which is targeted by relations. rules: The push rules which will be processed for this event. Returns: @@ -220,12 +220,6 @@ class BulkPushRuleEvaluator: if not self._relations_match_enabled: return {} - # If the event does not have a relation, then cannot have any mutual - # relations. - relation = relation_from_event(event) - if not relation: - return {} - # Pre-filter to figure out which relation types are interesting. rel_types = set() for rule, enabled in rules: @@ -246,9 +240,7 @@ class BulkPushRuleEvaluator: return {} # If any valid rules were found, fetch the mutual relations. - return await self.store.get_mutual_event_relations( - relation.parent_id, rel_types - ) + return await self.store.get_mutual_event_relations(parent_id, rel_types) @measure_func("action_for_event_by_user") async def action_for_event_by_user( @@ -281,9 +273,17 @@ class BulkPushRuleEvaluator: sender_power_level, ) = await self._get_power_levels_and_sender_level(event, context) - relations = await self._get_mutual_relations( - event, itertools.chain(*rules_by_user.values()) - ) + relation = relation_from_event(event) + # If the event does not have a relation, then cannot have any mutual + # relations or thread ID. + relations = {} + thread_id = "main" + if relation: + relations = await self._get_mutual_relations( + relation.parent_id, itertools.chain(*rules_by_user.values()) + ) + if relation.rel_type == RelationTypes.THREAD: + thread_id = relation.parent_id evaluator = PushRuleEvaluatorForEvent( event, @@ -352,6 +352,7 @@ class BulkPushRuleEvaluator: event.event_id, actions_by_user, count_as_unread, + thread_id, ) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 3a3fb8c507..6b8668d2dc 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -98,6 +98,7 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.stream import StreamWorkerStore +from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -232,6 +233,104 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas replaces_index="event_push_summary_user_rm", ) + self.db_pool.updates.register_background_index_update( + "event_push_summary_unique_index2", + index_name="event_push_summary_unique_index2", + table="event_push_summary", + columns=["user_id", "room_id", "thread_id"], + unique=True, + ) + + self.db_pool.updates.register_background_update_handler( + "event_push_backfill_thread_id", + self._background_backfill_thread_id, + ) + + async def _background_backfill_thread_id( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Fill in the thread_id field for event_push_actions and event_push_summary. + + This is preparatory so that it can be made non-nullable in the future. + + Because all current (null) data is done in an unthreaded manner this + simply assumes it is on the "main" timeline. Since event_push_actions + are periodically cleared it is not possible to correctly re-calculate + the thread_id. + """ + event_push_actions_done = progress.get("event_push_actions_done", False) + + def add_thread_id_txn( + txn: LoggingTransaction, table_name: str, start_stream_ordering: int + ) -> int: + sql = f""" + SELECT stream_ordering + FROM {table_name} + WHERE + thread_id IS NULL + AND stream_ordering > ? + ORDER BY stream_ordering + LIMIT ? + """ + txn.execute(sql, (start_stream_ordering, batch_size)) + + # No more rows to process. + rows = txn.fetchall() + if not rows: + progress[f"{table_name}_done"] = True + self.db_pool.updates._background_update_progress_txn( + txn, "event_push_backfill_thread_id", progress + ) + return 0 + + # Update the thread ID for any of those rows. + max_stream_ordering = rows[-1][0] + + sql = f""" + UPDATE {table_name} + SET thread_id = 'main' + WHERE stream_ordering <= ? AND thread_id IS NULL + """ + txn.execute(sql, (max_stream_ordering,)) + + # Update progress. + processed_rows = txn.rowcount + progress[f"max_{table_name}_stream_ordering"] = max_stream_ordering + self.db_pool.updates._background_update_progress_txn( + txn, "event_push_backfill_thread_id", progress + ) + + return processed_rows + + # First update the event_push_actions table, then the event_push_summary table. + # + # Note that the event_push_actions_staging table is ignored since it is + # assumed that items in that table will only exist for a short period of + # time. + if not event_push_actions_done: + result = await self.db_pool.runInteraction( + "event_push_backfill_thread_id", + add_thread_id_txn, + "event_push_actions", + progress.get("max_event_push_actions_stream_ordering", 0), + ) + else: + result = await self.db_pool.runInteraction( + "event_push_backfill_thread_id", + add_thread_id_txn, + "event_push_summary", + progress.get("max_event_push_summary_stream_ordering", 0), + ) + + # Only done after the event_push_summary table is done. + if not result: + await self.db_pool.updates._end_background_update( + "event_push_backfill_thread_id" + ) + + return result + @cached(tree=True, max_entries=5000) async def get_unread_event_push_actions_by_room_for_user( self, @@ -670,6 +769,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas event_id: str, user_id_actions: Dict[str, Collection[Union[Mapping, str]]], count_as_unread: bool, + thread_id: str, ) -> None: """Add the push actions for the event to the push action staging area. @@ -678,6 +778,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas user_id_actions: A mapping of user_id to list of push actions, where an action can either be a string or dict. count_as_unread: Whether this event should increment unread counts. + thread_id: The thread this event is parent of, if applicable. """ if not user_id_actions: return @@ -686,7 +787,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # can be used to insert into the `event_push_actions_staging` table. def _gen_entry( user_id: str, actions: Collection[Union[Mapping, str]] - ) -> Tuple[str, str, str, int, int, int]: + ) -> Tuple[str, str, str, int, int, int, str]: is_highlight = 1 if _action_has_highlight(actions) else 0 notif = 1 if "notify" in actions else 0 return ( @@ -696,11 +797,20 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas notif, # notif column is_highlight, # highlight column int(count_as_unread), # unread column + thread_id, # thread_id column ) await self.db_pool.simple_insert_many( "event_push_actions_staging", - keys=("event_id", "user_id", "actions", "notif", "highlight", "unread"), + keys=( + "event_id", + "user_id", + "actions", + "notif", + "highlight", + "unread", + "thread_id", + ), values=[ _gen_entry(user_id, actions) for user_id, actions in user_id_actions.items() @@ -981,6 +1091,8 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas ) # Replace the previous summary with the new counts. + # + # TODO(threads): Upsert per-thread instead of setting them all to main. self.db_pool.simple_upsert_txn( txn, table="event_push_summary", @@ -990,6 +1102,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas "unread_count": unread_count, "stream_ordering": old_rotate_stream_ordering, "last_receipt_stream_ordering": stream_ordering, + "thread_id": "main", }, ) @@ -1138,17 +1251,19 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas logger.info("Rotating notifications, handling %d rows", len(summaries)) + # TODO(threads): Update on a per-thread basis. self.db_pool.simple_upsert_many_txn( txn, table="event_push_summary", key_names=("user_id", "room_id"), key_values=[(user_id, room_id) for user_id, room_id in summaries], - value_names=("notif_count", "unread_count", "stream_ordering"), + value_names=("notif_count", "unread_count", "stream_ordering", "thread_id"), value_values=[ ( summary.notif_count, summary.unread_count, summary.stream_ordering, + "main", ) for summary in summaries.values() ], diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index a4010ee28d..c0b4080e4b 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2192,9 +2192,9 @@ class PersistEventsStore: sql = """ INSERT INTO event_push_actions ( room_id, event_id, user_id, actions, stream_ordering, - topological_ordering, notif, highlight, unread + topological_ordering, notif, highlight, unread, thread_id ) - SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight, unread + SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight, unread, thread_id FROM event_push_actions_staging WHERE event_id = ? """ diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 719a12b0ae..ddb8e80b69 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -113,6 +113,24 @@ class ReceiptsWorkerStore(SQLBaseStore): prefilled_cache=receipts_stream_prefill, ) + self.db_pool.updates.register_background_index_update( + "receipts_linearized_unique_index", + index_name="receipts_linearized_unique_index", + table="receipts_linearized", + columns=["room_id", "receipt_type", "user_id"], + where_clause="thread_id IS NULL", + unique=True, + ) + + self.db_pool.updates.register_background_index_update( + "receipts_graph_unique_index", + index_name="receipts_graph_unique_index", + table="receipts_graph", + columns=["room_id", "receipt_type", "user_id"], + where_clause="thread_id IS NULL", + unique=True, + ) + def get_max_receipt_stream_id(self) -> int: """Get the current max stream ID for receipts stream""" return self._receipts_id_gen.get_current_token() @@ -677,6 +695,7 @@ class ReceiptsWorkerStore(SQLBaseStore): "event_id": event_id, "event_stream_ordering": stream_ordering, "data": json_encoder.encode(data), + "thread_id": None, }, # receipts_linearized has a unique constraint on # (user_id, room_id, receipt_type), so no need to lock @@ -824,6 +843,7 @@ class ReceiptsWorkerStore(SQLBaseStore): values={ "event_ids": json_encoder.encode(event_ids), "data": json_encoder.encode(data), + "thread_id": None, }, # receipts_graph has a unique constraint on # (user_id, room_id, receipt_type), so no need to lock diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 32cda5e3ba..38c9532bfd 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 72 # remember to update the list below when updating +SCHEMA_VERSION = 73 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -77,6 +77,10 @@ Changes in SCHEMA_VERSION = 72: - Tables related to groups are dropped. - Unused column application_services_state.last_txn is dropped - Cache invalidation stream id sequence now begins at 2 to match code expectation. + +Changes in SCHEMA_VERSION = 73; + - thread_id column is added to event_push_actions, event_push_actions_staging + event_push_summary, receipts_linearized, and receipts_graph. """ diff --git a/synapse/storage/schema/main/delta/72/06thread_notifications.sql b/synapse/storage/schema/main/delta/72/06thread_notifications.sql new file mode 100644 index 0000000000..2f4f5dac7a --- /dev/null +++ b/synapse/storage/schema/main/delta/72/06thread_notifications.sql @@ -0,0 +1,30 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a nullable column for thread ID to the event push actions tables; this +-- will be filled in with a default value for any previously existing rows. +-- +-- After migration this can be made non-nullable. + +ALTER TABLE event_push_actions_staging ADD COLUMN thread_id TEXT; +ALTER TABLE event_push_actions ADD COLUMN thread_id TEXT; +ALTER TABLE event_push_summary ADD COLUMN thread_id TEXT; + +-- Update the unique index for `event_push_summary`. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7006, 'event_push_summary_unique_index2', '{}'); + +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7006, 'event_push_backfill_thread_id', '{}', 'event_push_summary_unique_index2'); diff --git a/synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres b/synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres new file mode 100644 index 0000000000..55fff9e278 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres @@ -0,0 +1,30 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a nullable column for thread ID to the receipts table; this allows a +-- receipt per user, per room, as well as an unthreaded receipt (corresponding +-- to a null thread ID). + +ALTER TABLE receipts_linearized ADD COLUMN thread_id TEXT; +ALTER TABLE receipts_graph ADD COLUMN thread_id TEXT; + +-- Rebuild the unique constraint with the thread_id. +ALTER TABLE receipts_linearized + ADD CONSTRAINT receipts_linearized_uniqueness_thread + UNIQUE (room_id, receipt_type, user_id, thread_id); + +ALTER TABLE receipts_graph + ADD CONSTRAINT receipts_graph_uniqueness_thread + UNIQUE (room_id, receipt_type, user_id, thread_id); diff --git a/synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite b/synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite new file mode 100644 index 0000000000..232f67deb4 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite @@ -0,0 +1,70 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Allow multiple receipts per user per room via a nullable thread_id column. +-- +-- SQLite doesn't support modifying constraints to an existing table, so it must +-- be recreated. + +-- Create the new tables. +CREATE TABLE receipts_linearized_new ( + stream_id BIGINT NOT NULL, + room_id TEXT NOT NULL, + receipt_type TEXT NOT NULL, + user_id TEXT NOT NULL, + event_id TEXT NOT NULL, + thread_id TEXT, + event_stream_ordering BIGINT, + data TEXT NOT NULL, + CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id), + CONSTRAINT receipts_linearized_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id) +); + +CREATE TABLE receipts_graph_new ( + room_id TEXT NOT NULL, + receipt_type TEXT NOT NULL, + user_id TEXT NOT NULL, + event_ids TEXT NOT NULL, + thread_id TEXT, + data TEXT NOT NULL, + CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id), + CONSTRAINT receipts_graph_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id) +); + +-- Drop the old indexes. +DROP INDEX IF EXISTS receipts_linearized_id; +DROP INDEX IF EXISTS receipts_linearized_room_stream; +DROP INDEX IF EXISTS receipts_linearized_user; + +-- Copy the data. +INSERT INTO receipts_linearized_new (stream_id, room_id, receipt_type, user_id, event_id, event_stream_ordering, data) + SELECT stream_id, room_id, receipt_type, user_id, event_id, event_stream_ordering, data + FROM receipts_linearized; +INSERT INTO receipts_graph_new (room_id, receipt_type, user_id, event_ids, data) + SELECT room_id, receipt_type, user_id, event_ids, data + FROM receipts_graph; + +-- Drop the old tables. +DROP TABLE receipts_linearized; +DROP TABLE receipts_graph; + +-- Rename the tables. +ALTER TABLE receipts_linearized_new RENAME TO receipts_linearized; +ALTER TABLE receipts_graph_new RENAME TO receipts_graph; + +-- Create the indices. +CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id ); +CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id ); +CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id ); diff --git a/synapse/storage/schema/main/delta/72/08thread_receipts.sql b/synapse/storage/schema/main/delta/72/08thread_receipts.sql new file mode 100644 index 0000000000..e35b021f31 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/08thread_receipts.sql @@ -0,0 +1,20 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7007, 'receipts_linearized_unique_index', '{}'); + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7007, 'receipts_graph_unique_index', '{}'); diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 531a0db2d0..49a21e2e85 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -404,6 +404,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): event.event_id, {user_id: actions for user_id, actions in push_actions}, False, + "main", ) ) return event, context -- cgit 1.4.1 From 957e3d74fc70f92bb9ed3c709f87752bf77a8c90 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 14 Sep 2022 13:57:50 -0500 Subject: Keep track when we try and fail to process a pulled event (#13589) We can follow-up this PR with: 1. Only try to backfill from an event if we haven't tried recently -> https://github.com/matrix-org/synapse/issues/13622 1. When we decide to backfill that event again, process it in the background so it doesn't block and make `/messages` slow when we know it will probably fail again -> https://github.com/matrix-org/synapse/issues/13623 1. Generally track failures everywhere we try and fail to pull an event over federation -> https://github.com/matrix-org/synapse/issues/13700 Fix https://github.com/matrix-org/synapse/issues/13621 Part of https://github.com/matrix-org/synapse/issues/13356 Mentioned in [internal doc](https://docs.google.com/document/d/1lvUoVfYUiy6UaHB6Rb4HicjaJAU40-APue9Q4vzuW3c/edit#bookmark=id.qv7cj51sv9i5) --- changelog.d/13589.feature | 1 + synapse/handlers/federation_event.py | 7 + synapse/storage/databases/main/event_federation.py | 45 +++++ synapse/storage/databases/main/events.py | 32 ++- synapse/storage/schema/__init__.py | 2 + .../main/delta/73/01event_failed_pull_attempts.sql | 29 +++ tests/handlers/test_federation_event.py | 222 +++++++++++++++++++++ 7 files changed, 329 insertions(+), 9 deletions(-) create mode 100644 changelog.d/13589.feature create mode 100644 synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql diff --git a/changelog.d/13589.feature b/changelog.d/13589.feature new file mode 100644 index 0000000000..78fa1ddb52 --- /dev/null +++ b/changelog.d/13589.feature @@ -0,0 +1 @@ +Keep track when we attempt to backfill an event but fail so we can intelligently back-off in the future. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index ace7adcffb..9e065e1116 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -862,6 +862,9 @@ class FederationEventHandler: self._sanity_check_event(event) except SynapseError as err: logger.warning("Event %s failed sanity check: %s", event_id, err) + await self._store.record_event_failed_pull_attempt( + event.room_id, event_id, str(err) + ) return try: @@ -897,6 +900,10 @@ class FederationEventHandler: backfilled=backfilled, ) except FederationError as e: + await self._store.record_event_failed_pull_attempt( + event.room_id, event_id, str(e) + ) + if e.code == 403: logger.warning("Pulled event %s failed history check.", event_id) else: diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index ca47a22bf1..ef477978ed 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1294,6 +1294,51 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas return event_id_results + @trace + async def record_event_failed_pull_attempt( + self, room_id: str, event_id: str, cause: str + ) -> None: + """ + Record when we fail to pull an event over federation. + + This information allows us to be more intelligent when we decide to + retry (we don't need to fail over and over) and we can process that + event in the background so we don't block on it each time. + + Args: + room_id: The room where the event failed to pull from + event_id: The event that failed to be fetched or processed + cause: The error message or reason that we failed to pull the event + """ + await self.db_pool.runInteraction( + "record_event_failed_pull_attempt", + self._record_event_failed_pull_attempt_upsert_txn, + room_id, + event_id, + cause, + db_autocommit=True, # Safe as it's a single upsert + ) + + def _record_event_failed_pull_attempt_upsert_txn( + self, + txn: LoggingTransaction, + room_id: str, + event_id: str, + cause: str, + ) -> None: + sql = """ + INSERT INTO event_failed_pull_attempts ( + room_id, event_id, num_attempts, last_attempt_ts, last_cause + ) + VALUES (?, ?, ?, ?, ?) + ON CONFLICT (room_id, event_id) DO UPDATE SET + num_attempts=event_failed_pull_attempts.num_attempts + 1, + last_attempt_ts=EXCLUDED.last_attempt_ts, + last_cause=EXCLUDED.last_cause; + """ + + txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause)) + async def get_missing_events( self, room_id: str, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index c0b4080e4b..1b54a2eb57 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2435,17 +2435,31 @@ class PersistEventsStore: "DELETE FROM event_backward_extremities" " WHERE event_id = ? AND room_id = ?" ) + backward_extremity_tuples_to_remove = [ + (ev.event_id, ev.room_id) + for ev in events + if not ev.internal_metadata.is_outlier() + # If we encountered an event with no prev_events, then we might + # as well remove it now because it won't ever have anything else + # to backfill from. + or len(ev.prev_event_ids()) == 0 + ] txn.execute_batch( query, - [ - (ev.event_id, ev.room_id) - for ev in events - if not ev.internal_metadata.is_outlier() - # If we encountered an event with no prev_events, then we might - # as well remove it now because it won't ever have anything else - # to backfill from. - or len(ev.prev_event_ids()) == 0 - ], + backward_extremity_tuples_to_remove, + ) + + # Clear out the failed backfill attempts after we successfully pulled + # the event. Since we no longer need these events as backward + # extremities, it also means that they won't be backfilled from again so + # we no longer need to store the backfill attempts around it. + query = """ + DELETE FROM event_failed_pull_attempts + WHERE event_id = ? and room_id = ? + """ + txn.execute_batch( + query, + backward_extremity_tuples_to_remove, ) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 38c9532bfd..68e055c664 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -81,6 +81,8 @@ Changes in SCHEMA_VERSION = 72: Changes in SCHEMA_VERSION = 73; - thread_id column is added to event_push_actions, event_push_actions_staging event_push_summary, receipts_linearized, and receipts_graph. + - Add table `event_failed_pull_attempts` to keep track when we fail to pull + events over federation. """ diff --git a/synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql b/synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql new file mode 100644 index 0000000000..d397ee1082 --- /dev/null +++ b/synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql @@ -0,0 +1,29 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- Add a table that keeps track of when we failed to pull an event over +-- federation (via /backfill, `/event`, `/get_missing_events`, etc). This allows +-- us to be more intelligent when we decide to retry (we don't need to fail over +-- and over) and we can process that event in the background so we don't block +-- on it each time. +CREATE TABLE IF NOT EXISTS event_failed_pull_attempts( + room_id TEXT NOT NULL REFERENCES rooms (room_id), + event_id TEXT NOT NULL, + num_attempts INT NOT NULL, + last_attempt_ts BIGINT NOT NULL, + last_cause TEXT NOT NULL, + PRIMARY KEY (room_id, event_id) +); diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 51c8dd6498..b5b89405a4 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -227,3 +227,225 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): if prev_exists_as_outlier: self.mock_federation_transport_client.get_event.assert_not_called() + + def test_process_pulled_event_records_failed_backfill_attempts( + self, + ) -> None: + """ + Test to make sure that failed backfill attempts for an event are + recorded in the `event_failed_pull_attempts` table. + + In this test, we pretend we are processing a "pulled" event via + backfill. The pulled event has a fake `prev_event` which our server has + obviously never seen before so it attempts to request the state at that + `prev_event` which expectedly fails because it's a fake event. Because + the server can't fetch the state at the missing `prev_event`, the + "pulled" event fails the history check and is fails to process. + + We check that we correctly record the number of failed pull attempts + of the pulled event and as a sanity check, that the "pulled" event isn't + persisted. + """ + OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}" + main_store = self.hs.get_datastores().main + + # Create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(main_store.get_room_version(room_id)) + + # We expect an outbound request to /state_ids, so stub that out + self.mock_federation_transport_client.get_room_state_ids.return_value = make_awaitable( + { + # Mimic the other server not knowing about the state at all. + # We want to cause Synapse to throw an error (`Unable to get + # missing prev_event $fake_prev_event`) and fail to backfill + # the pulled event. + "pdu_ids": [], + "auth_chain_ids": [], + } + ) + # We also expect an outbound request to /state + self.mock_federation_transport_client.get_room_state.return_value = make_awaitable( + StateRequestResponse( + # Mimic the other server not knowing about the state at all. + # We want to cause Synapse to throw an error (`Unable to get + # missing prev_event $fake_prev_event`) and fail to backfill + # the pulled event. + auth_events=[], + state=[], + ) + ) + + pulled_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "test_regular_type", + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [ + # The fake prev event will make the pulled event fail + # the history check (`Unable to get missing prev_event + # $fake_prev_event`) + "$fake_prev_event" + ], + "auth_events": [], + "origin_server_ts": 1, + "depth": 12, + "content": {"body": "pulled"}, + } + ), + room_version, + ) + + # The function under test: try to process the pulled event + with LoggingContext("test"): + self.get_success( + self.hs.get_federation_event_handler()._process_pulled_event( + self.OTHER_SERVER_NAME, pulled_event, backfilled=True + ) + ) + + # Make sure our failed pull attempt was recorded + backfill_num_attempts = self.get_success( + main_store.db_pool.simple_select_one_onecol( + table="event_failed_pull_attempts", + keyvalues={"event_id": pulled_event.event_id}, + retcol="num_attempts", + ) + ) + self.assertEqual(backfill_num_attempts, 1) + + # The function under test: try to process the pulled event again + with LoggingContext("test"): + self.get_success( + self.hs.get_federation_event_handler()._process_pulled_event( + self.OTHER_SERVER_NAME, pulled_event, backfilled=True + ) + ) + + # Make sure our second failed pull attempt was recorded (`num_attempts` was incremented) + backfill_num_attempts = self.get_success( + main_store.db_pool.simple_select_one_onecol( + table="event_failed_pull_attempts", + keyvalues={"event_id": pulled_event.event_id}, + retcol="num_attempts", + ) + ) + self.assertEqual(backfill_num_attempts, 2) + + # And as a sanity check, make sure the event was not persisted through all of this. + persisted = self.get_success( + main_store.get_event(pulled_event.event_id, allow_none=True) + ) + self.assertIsNone( + persisted, + "pulled event that fails the history check should not be persisted at all", + ) + + def test_process_pulled_event_clears_backfill_attempts_after_being_successfully_persisted( + self, + ) -> None: + """ + Test to make sure that failed pull attempts + (`event_failed_pull_attempts` table) for an event are cleared after the + event is successfully persisted. + + In this test, we pretend we are processing a "pulled" event via + backfill. The pulled event succesfully processes and the backward + extremeties are updated along with clearing out any failed pull attempts + for those old extremities. + + We check that we correctly cleared failed pull attempts of the + pulled event. + """ + OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}" + main_store = self.hs.get_datastores().main + + # Create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(main_store.get_room_version(room_id)) + + # allow the remote user to send state events + self.helper.send_state( + room_id, + "m.room.power_levels", + {"events_default": 0, "state_default": 0}, + tok=tok, + ) + + # add the remote user to the room + member_event = self.get_success( + event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join") + ) + + initial_state_map = self.get_success( + main_store.get_partial_current_state_ids(room_id) + ) + + auth_event_ids = [ + initial_state_map[("m.room.create", "")], + initial_state_map[("m.room.power_levels", "")], + member_event.event_id, + ] + + pulled_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "test_regular_type", + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [member_event.event_id], + "auth_events": auth_event_ids, + "origin_server_ts": 1, + "depth": 12, + "content": {"body": "pulled"}, + } + ), + room_version, + ) + + # Fake the "pulled" event failing to backfill once so we can test + # if it's cleared out later on. + self.get_success( + main_store.record_event_failed_pull_attempt( + pulled_event.room_id, pulled_event.event_id, "fake cause" + ) + ) + # Make sure we have a failed pull attempt recorded for the pulled event + backfill_num_attempts = self.get_success( + main_store.db_pool.simple_select_one_onecol( + table="event_failed_pull_attempts", + keyvalues={"event_id": pulled_event.event_id}, + retcol="num_attempts", + ) + ) + self.assertEqual(backfill_num_attempts, 1) + + # The function under test: try to process the pulled event + with LoggingContext("test"): + self.get_success( + self.hs.get_federation_event_handler()._process_pulled_event( + self.OTHER_SERVER_NAME, pulled_event, backfilled=True + ) + ) + + # Make sure the failed pull attempts for the pulled event are cleared + backfill_num_attempts = self.get_success( + main_store.db_pool.simple_select_one_onecol( + table="event_failed_pull_attempts", + keyvalues={"event_id": pulled_event.event_id}, + retcol="num_attempts", + allow_none=True, + ) + ) + self.assertIsNone(backfill_num_attempts) + + # And as a sanity check, make sure the "pulled" event was persisted. + persisted = self.get_success( + main_store.get_event(pulled_event.event_id, allow_none=True) + ) + self.assertIsNotNone(persisted, "pulled event was not persisted at all") -- cgit 1.4.1