diff options
author | Erik Johnston <erik@matrix.org> | 2024-07-24 11:55:43 +0100 |
---|---|---|
committer | Erik Johnston <erik@matrix.org> | 2024-07-24 11:55:43 +0100 |
commit | 41dbc94b4c8e361c1a8d12fceb7c364652851d1c (patch) | |
tree | 60cb133b66509be1b34fd0b27ff725f43073d82a | |
parent | Merge branch 'erikj/ss_faster_sort' into erikj/ss_hacks (diff) | |
parent | Merge remote-tracking branch 'origin/develop' into erikj/ss_room_store (diff) | |
download | synapse-41dbc94b4c8e361c1a8d12fceb7c364652851d1c.tar.xz |
Merge branch 'erikj/ss_room_store' into erikj/ss_hacks
43 files changed, 2042 insertions, 460 deletions
diff --git a/CHANGES.md b/CHANGES.md index 0a2b816ed1..f869674ace 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,61 @@ +# Synapse 1.112.0rc1 (2024-07-23) + +### Features + +- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416)) +- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418)) +- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419)) +- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429)) +- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432)) +- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433)) +- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454)) + +### Bugfixes + +- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) +- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434)) +- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435)) +- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438)) + +### Improved Documentation + +- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387)) +- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423)) +- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451)) + +### Internal Changes + +- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424)) +- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) +- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) +- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) +- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) +- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453)) +- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458)) +- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460)) +- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461)) +- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468)) +- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469)) + + + +### Updates to locked dependencies + +* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441)) +* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464)) +* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444)) +* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440)) +* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445)) +* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465)) +* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466)) +* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456)) +* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467)) +* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448)) +* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443)) +* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446)) +* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442)) +* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427)) + # Synapse 1.111.0 (2024-07-16) No significant changes since 1.111.0rc2. diff --git a/changelog.d/17387.doc b/changelog.d/17387.doc deleted file mode 100644 index 82be10f135..0000000000 --- a/changelog.d/17387.doc +++ /dev/null @@ -1 +0,0 @@ -Update the readme image to have a white background, so that it is readable in dark mode. \ No newline at end of file diff --git a/changelog.d/17416.feature b/changelog.d/17416.feature deleted file mode 100644 index 1d119cf48f..0000000000 --- a/changelog.d/17416.feature +++ /dev/null @@ -1 +0,0 @@ -Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17418.feature b/changelog.d/17418.feature deleted file mode 100644 index c5e56bc500..0000000000 --- a/changelog.d/17418.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17419.feature b/changelog.d/17419.feature deleted file mode 100644 index 186a27c470..0000000000 --- a/changelog.d/17419.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17423.doc b/changelog.d/17423.doc deleted file mode 100644 index 972bc659e4..0000000000 --- a/changelog.d/17423.doc +++ /dev/null @@ -1 +0,0 @@ -Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. diff --git a/changelog.d/17424.misc b/changelog.d/17424.misc deleted file mode 100644 index d4a81c137f..0000000000 --- a/changelog.d/17424.misc +++ /dev/null @@ -1 +0,0 @@ -Make sure we always use the right logic for enabling the media repo. diff --git a/changelog.d/17426.misc b/changelog.d/17426.misc deleted file mode 100644 index 886e5d4389..0000000000 --- a/changelog.d/17426.misc +++ /dev/null @@ -1 +0,0 @@ -Fix documentation on `RateLimiter#record_action`. \ No newline at end of file diff --git a/changelog.d/17429.feature b/changelog.d/17429.feature deleted file mode 100644 index 608b75d632..0000000000 --- a/changelog.d/17429.feature +++ /dev/null @@ -1 +0,0 @@ -Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17432.feature b/changelog.d/17432.feature deleted file mode 100644 index c86f04c118..0000000000 --- a/changelog.d/17432.feature +++ /dev/null @@ -1 +0,0 @@ -Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17433.feature b/changelog.d/17433.feature deleted file mode 100644 index ac9b5dee69..0000000000 --- a/changelog.d/17433.feature +++ /dev/null @@ -1 +0,0 @@ -Prepare for authenticated media freeze. \ No newline at end of file diff --git a/changelog.d/17434.bugfix b/changelog.d/17434.bugfix deleted file mode 100644 index c7cce52397..0000000000 --- a/changelog.d/17434.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. diff --git a/changelog.d/17435.bugfix b/changelog.d/17435.bugfix deleted file mode 100644 index 2d06a7c7fc..0000000000 --- a/changelog.d/17435.bugfix +++ /dev/null @@ -1 +0,0 @@ -Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). diff --git a/changelog.d/17438.bugfix b/changelog.d/17438.bugfix deleted file mode 100644 index cff6eecd48..0000000000 --- a/changelog.d/17438.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. diff --git a/changelog.d/17439.bugfix b/changelog.d/17439.bugfix deleted file mode 100644 index f36c3ec255..0000000000 --- a/changelog.d/17439.bugfix +++ /dev/null @@ -1 +0,0 @@ -Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. \ No newline at end of file diff --git a/changelog.d/17449.bugfix b/changelog.d/17449.bugfix deleted file mode 100644 index cd847a3d1c..0000000000 --- a/changelog.d/17449.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary call to resume producing in fake channel. \ No newline at end of file diff --git a/changelog.d/17451.doc b/changelog.d/17451.doc deleted file mode 100644 index 357ac2c906..0000000000 --- a/changelog.d/17451.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. diff --git a/changelog.d/17453.misc b/changelog.d/17453.misc deleted file mode 100644 index 2978a52477..0000000000 --- a/changelog.d/17453.misc +++ /dev/null @@ -1 +0,0 @@ -Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. diff --git a/changelog.d/17458.misc b/changelog.d/17458.misc deleted file mode 100644 index 09cce15d0d..0000000000 --- a/changelog.d/17458.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up generating sliding sync responses. diff --git a/changelog.d/17460.misc b/changelog.d/17460.misc deleted file mode 100644 index fd99da5a95..0000000000 --- a/changelog.d/17460.misc +++ /dev/null @@ -1 +0,0 @@ -Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. diff --git a/debian/changelog b/debian/changelog index 0470e25f2d..5209b9f5fd 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium + + * New Synapse release 1.112.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 23 Jul 2024 08:58:55 -0600 + matrix-synapse-py3 (1.111.0) stable; urgency=medium * New Synapse release 1.111.0. diff --git a/debian/templates b/debian/templates index cab05715d0..7bfd3c2e9f 100644 --- a/debian/templates +++ b/debian/templates @@ -5,7 +5,7 @@ _Description: Name of the server: servers via federation. This is normally the public hostname of the server running synapse, but can be different if you set up delegation. Please refer to the delegation documentation in this case: - https://github.com/element-hq/synapse/blob/master/docs/delegate.md. + https://element-hq.github.io/synapse/latest/delegate.html. Template: matrix-synapse/report-stats Type: boolean diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index e8bc2df798..649f4f71c7 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -246,6 +246,7 @@ Example configuration: ```yaml presence: enabled: false + include_offline_users_on_sync: false ``` `enabled` can also be set to a special value of "untracked" which ignores updates @@ -254,6 +255,10 @@ received via clients and federation, while still accepting updates from the *The "untracked" option was added in Synapse 1.96.0.* +When clients perform an initial or `full_state` sync, presence results for offline users are +not included by default. Setting `include_offline_users_on_sync` to `true` will always include +offline users in the results. Defaults to false. + --- ### `require_auth_for_profile_requests` diff --git a/poetry.lock b/poetry.lock index 2bfcb59cf2..945b91e022 100644 --- a/poetry.lock +++ b/poetry.lock @@ -542,120 +542,105 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", [[package]] name = "hiredis" -version = "2.3.2" +version = "3.0.0" description = "Python wrapper for hiredis" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:742093f33d374098aa21c1696ac6e4874b52658c870513a297a89265a4d08fe5"}, - {file = "hiredis-2.3.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:9e14fb70ca4f7efa924f508975199353bf653f452e4ef0a1e47549e208f943d7"}, - {file = "hiredis-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d7302b4b17fcc1cc727ce84ded7f6be4655701e8d58744f73b09cb9ed2b13df"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed63e8b75c193c5e5a8288d9d7b011da076cc314fafc3bfd59ec1d8a750d48c8"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b4edee59dc089bc3948f4f6fba309f51aa2ccce63902364900aa0a553a85e97"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6481c3b7673a86276220140456c2a6fbfe8d1fb5c613b4728293c8634134824"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684840b014ce83541a087fcf2d48227196576f56ae3e944d4dfe14c0a3e0ccb7"}, - {file = "hiredis-2.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c4c0bcf786f0eac9593367b6279e9b89534e008edbf116dcd0de956524702c8"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66ab949424ac6504d823cba45c4c4854af5c59306a1531edb43b4dd22e17c102"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:322c668ee1c12d6c5750a4b1057e6b4feee2a75b3d25d630922a463cfe5e7478"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bfa73e3f163c6e8b2ec26f22285d717a5f77ab2120c97a2605d8f48b26950dac"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7f39f28ffc65de577c3bc0c7615f149e35bc927802a0f56e612db9b530f316f9"}, - {file = "hiredis-2.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:55ce31bf4711da879b96d511208efb65a6165da4ba91cb3a96d86d5a8d9d23e6"}, - {file = "hiredis-2.3.2-cp310-cp310-win32.whl", hash = "sha256:3dd63d0bbbe75797b743f35d37a4cca7ca7ba35423a0de742ae2985752f20c6d"}, - {file = "hiredis-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:ea002656a8d974daaf6089863ab0a306962c8b715db6b10879f98b781a2a5bf5"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:adfbf2e9c38b77d0db2fb32c3bdaea638fa76b4e75847283cd707521ad2475ef"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:80b02d27864ebaf9b153d4b99015342382eeaed651f5591ce6f07e840307c56d"}, - {file = "hiredis-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd40d2e2f82a483de0d0a6dfd8c3895a02e55e5c9949610ecbded18188fd0a56"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfa904045d7cebfb0f01dad51352551cce1d873d7c3f80c7ded7d42f8cac8f89"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28bd184b33e0dd6d65816c16521a4ba1ffbe9ff07d66873c42ea4049a62fed83"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f70481213373d44614148f0f2e38e7905be3f021902ae5167289413196de4ba4"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8797b528c1ff81eef06713623562b36db3dafa106b59f83a6468df788ff0d1"}, - {file = "hiredis-2.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02fc71c8333586871602db4774d3a3e403b4ccf6446dc4603ec12df563127cee"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0da56915bda1e0a49157191b54d3e27689b70960f0685fdd5c415dacdee2fbed"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e2674a5a3168349435b08fa0b82998ed2536eb9acccf7087efe26e4cd088a525"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:dc1c3fd49930494a67dcec37d0558d99d84eca8eb3f03b17198424538f2608d7"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:14c7b43205e515f538a9defb4e411e0f0576caaeeda76bb9993ed505486f7562"}, - {file = "hiredis-2.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bac7e02915b970c3723a7a7c5df4ba7a11a3426d2a3f181e041aa506a1ff028"}, - {file = "hiredis-2.3.2-cp311-cp311-win32.whl", hash = "sha256:63a090761ddc3c1f7db5e67aa4e247b4b3bb9890080bdcdadd1b5200b8b89ac4"}, - {file = "hiredis-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:70d226ab0306a5b8d408235cabe51d4bf3554c9e8a72d53ce0b3c5c84cf78881"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5c614552c6bd1d0d907f448f75550f6b24fb56cbfce80c094908b7990cad9702"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9c431431abf55b64347ddc8df68b3ef840269cb0aa5bc2d26ad9506eb4b1b866"}, - {file = "hiredis-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a45857e87e9d2b005e81ddac9d815a33efd26ec67032c366629f023fe64fb415"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e138d141ec5a6ec800b6d01ddc3e5561ce1c940215e0eb9960876bfde7186aae"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:387f655444d912a963ab68abf64bf6e178a13c8e4aa945cb27388fd01a02e6f1"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4852f4bf88f0e2d9bdf91279892f5740ed22ae368335a37a52b92a5c88691140"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d711c107e83117129b7f8bd08e9820c43ceec6204fff072a001fd82f6d13db9f"}, - {file = "hiredis-2.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92830c16885f29163e1c2da1f3c1edb226df1210ec7e8711aaabba3dd0d5470a"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:16b01d9ceae265d4ab9547be0cd628ecaff14b3360357a9d30c029e5ae8b7e7f"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5986fb5f380169270a0293bebebd95466a1c85010b4f1afc2727e4d17c452512"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:49532d7939cc51f8e99efc326090c54acf5437ed88b9c904cc8015b3c4eda9c9"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8f34801b251ca43ad70691fb08b606a2e55f06b9c9fb1fc18fd9402b19d70f7b"}, - {file = "hiredis-2.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7298562a49d95570ab1c7fc4051e72824c6a80e907993a21a41ba204223e7334"}, - {file = "hiredis-2.3.2-cp312-cp312-win32.whl", hash = "sha256:e1d86b75de787481b04d112067a4033e1ecfda2a060e50318a74e4e1c9b2948c"}, - {file = "hiredis-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:6dbfe1887ffa5cf3030451a56a8f965a9da2fa82b7149357752b67a335a05fc6"}, - {file = "hiredis-2.3.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:4fc242e9da4af48714199216eb535b61e8f8d66552c8819e33fc7806bd465a09"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e81aa4e9a1fcf604c8c4b51aa5d258e195a6ba81efe1da82dea3204443eba01c"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419780f8583ddb544ffa86f9d44a7fcc183cd826101af4e5ffe535b6765f5f6b"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6871306d8b98a15e53a5f289ec1106a3a1d43e7ab6f4d785f95fcef9a7bd9504"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb0b35b63717ef1e41d62f4f8717166f7c6245064957907cfe177cc144357c"}, - {file = "hiredis-2.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c490191fa1218851f8a80c5a21a05a6f680ac5aebc2e688b71cbfe592f8fec6"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4baf4b579b108062e91bd2a991dc98b9dc3dc06e6288db2d98895eea8acbac22"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e627d8ef5e100556e09fb44c9571a432b10e11596d3c4043500080ca9944a91a"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:ba3dc0af0def8c21ce7d903c59ea1e8ec4cb073f25ece9edaec7f92a286cd219"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:56e9b7d6051688ca94e68c0c8a54a243f8db841911b683cedf89a29d4de91509"}, - {file = "hiredis-2.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:380e029bb4b1d34cf560fcc8950bf6b57c2ef0c9c8b7c7ac20b7c524a730fadd"}, - {file = "hiredis-2.3.2-cp37-cp37m-win32.whl", hash = "sha256:948d9f2ca7841794dd9b204644963a4bcd69ced4e959b0d4ecf1b8ce994a6daa"}, - {file = "hiredis-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:cfa67afe2269b2d203cd1389c00c5bc35a287cd57860441fb0e53b371ea6a029"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bcbe47da0aebc00a7cfe3ebdcff0373b86ce2b1856251c003e3d69c9db44b5a7"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f2c9c0d910dd3f7df92f0638e7f65d8edd7f442203caf89c62fc79f11b0b73f8"}, - {file = "hiredis-2.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:01b6c24c0840ac7afafbc4db236fd55f56a9a0919a215c25a238f051781f4772"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f567489f422d40c21e53212a73bef4638d9f21043848150f8544ef1f3a6ad1"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28adecb308293e705e44087a1c2d557a816f032430d8a2a9bb7873902a1c6d48"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27e9619847e9dc70b14b1ad2d0fb4889e7ca18996585c3463cff6c951fd6b10b"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a0026cfbf29f07649b0e34509091a2a6016ff8844b127de150efce1c3aff60b"}, - {file = "hiredis-2.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9de7586522e5da6bee83c9cf0dcccac0857a43249cb4d721a2e312d98a684d1"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e58494f282215fc461b06709e9a195a24c12ba09570f25bdf9efb036acc05101"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3a32b4b76d46f1eb42b24a918d51d8ca52411a381748196241d59a895f7c5c"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1979334ccab21a49c544cd1b8d784ffb2747f99a51cb0bd0976eebb517628382"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0c0773266e1c38a06e7593bd08870ac1503f5f0ce0f5c63f2b4134b090b5d6a4"}, - {file = "hiredis-2.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bd1cee053416183adcc8e6134704c46c60c3f66b8faaf9e65bf76191ca59a2f7"}, - {file = "hiredis-2.3.2-cp38-cp38-win32.whl", hash = "sha256:5341ce3d01ef3c7418a72e370bf028c7aeb16895e79e115fe4c954fff990489e"}, - {file = "hiredis-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:8fc7197ff33047ce43a67851ccf190acb5b05c52fd4a001bb55766358f04da68"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:f47775e27388b58ce52f4f972f80e45b13c65113e9e6b6bf60148f893871dc9b"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9412a06b8a8e09abd6313d96864b6d7713c6003a365995a5c70cfb9209df1570"}, - {file = "hiredis-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3020b60e3fc96d08c2a9b011f1c2e2a6bdcc09cb55df93c509b88be5cb791df"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53d0f2c59bce399b8010a21bc779b4f8c32d0f582b2284ac8c98dc7578b27bc4"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57c0d0c7e308ed5280a4900d4468bbfec51f0e1b4cde1deae7d4e639bc6b7766"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d63318ca189fddc7e75f6a4af8eae9c0545863619fb38cfba5f43e81280b286"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e741ffe4e2db78a1b9dd6e5d29678ce37fbaaf65dfe132e5b82a794413302ef1"}, - {file = "hiredis-2.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb98038ccd368e0d88bd92ee575c58cfaf33e77f788c36b2a89a84ee1936dc6b"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:eae62ed60d53b3561148bcd8c2383e430af38c0deab9f2dd15f8874888ffd26f"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca33c175c1cf60222d9c6d01c38fc17ec3a484f32294af781de30226b003e00f"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c5f6972d2bdee3cd301d5c5438e31195cf1cabf6fd9274491674d4ceb46914d"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:a6b54dabfaa5dbaa92f796f0c32819b4636e66aa8e9106c3d421624bd2a2d676"}, - {file = "hiredis-2.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e96cd35df012a17c87ae276196ea8f215e77d6eeca90709eb03999e2d5e3fd8a"}, - {file = "hiredis-2.3.2-cp39-cp39-win32.whl", hash = "sha256:63b99b5ea9fe4f21469fb06a16ca5244307678636f11917359e3223aaeca0b67"}, - {file = "hiredis-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a50c8af811b35b8a43b1590cf890b61ff2233225257a3cad32f43b3ec7ff1b9f"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e8bf4444b09419b77ce671088db9f875b26720b5872d97778e2545cd87dba4a"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bd42d0d45ea47a2f96babd82a659fbc60612ab9423a68e4a8191e538b85542a"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80441b55edbef868e2563842f5030982b04349408396e5ac2b32025fb06b5212"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec444ab8f27562a363672d6a7372bc0700a1bdc9764563c57c5f9efa0e592b5f"}, - {file = "hiredis-2.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f9f606e810858207d4b4287b4ef0dc622c2aa469548bf02b59dcc616f134f811"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c3dde4ca00fe9eee3b76209711f1941bb86db42b8a75d7f2249ff9dfc026ab0e"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4dd676107a1d3c724a56a9d9db38166ad4cf44f924ee701414751bd18a784a0"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce42649e2676ad783186264d5ffc788a7612ecd7f9effb62d51c30d413a3eefe"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e3f8b1733078ac663dad57e20060e16389a60ab542f18a97931f3a2a2dd64a4"}, - {file = "hiredis-2.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:532a84a82156a82529ec401d1c25d677c6543c791e54a263aa139541c363995f"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d59f88c4daa36b8c38e59ac7bffed6f5d7f68eaccad471484bf587b28ccc478"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91a14dd95e24dc078204b18b0199226ee44644974c645dc54ee7b00c3157330"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb777a38797c8c7df0444533119570be18d1a4ce5478dffc00c875684df7bfcb"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d47c915897a99d0d34a39fad4be97b4b709ab3d0d3b779ebccf2b6024a8c681e"}, - {file = "hiredis-2.3.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:333b5e04866758b11bda5f5315b4e671d15755fc6ed3b7969721bc6311d0ee36"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c8937f1100435698c18e4da086968c4b5d70e86ea718376f833475ab3277c9aa"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa45f7d771094b8145af10db74704ab0f698adb682fbf3721d8090f90e42cc49"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d5ebc93c39aed4b5bc769f8ce0819bc50e74bb95d57a35f838f1c4378978e0"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a797d8c7df9944314d309b0d9e1b354e2fa4430a05bb7604da13b6ad291bf959"}, - {file = "hiredis-2.3.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e15a408f71a6c8c87b364f1f15a6cd9c1baca12bbc47a326ac8ab99ec7ad3c64"}, - {file = "hiredis-2.3.2.tar.gz", hash = "sha256:733e2456b68f3f126ddaf2cd500a33b25146c3676b97ea843665717bda0c5d43"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:4b182791c41c5eb1d9ed736f0ff81694b06937ca14b0d4dadde5dadba7ff6dae"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13c275b483a052dd645eb2cb60d6380f1f5215e4c22d6207e17b86be6dd87ffa"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1018cc7f12824506f165027eabb302735b49e63af73eb4d5450c66c88f47026"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83a29cc7b21b746cb6a480189e49f49b2072812c445e66a9e38d2004d496b81c"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e241fab6332e8fb5f14af00a4a9c6aefa22f19a336c069b7ddbf28ef8341e8d6"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fb8de899f0145d6c4d5d4bd0ee88a78eb980a7ffabd51e9889251b8f58f1785"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b23291951959141173eec10f8573538e9349fa27f47a0c34323d1970bf891ee5"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e421ac9e4b5efc11705a0d5149e641d4defdc07077f748667f359e60dc904420"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:77c8006c12154c37691b24ff293c077300c22944018c3ff70094a33e10c1d795"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:41afc0d3c18b59eb50970479a9c0e5544fb4b95e3a79cf2fbaece6ddefb926fe"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:04ccae6dcd9647eae6025425ab64edb4d79fde8b9e6e115ebfabc6830170e3b2"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fe91d62b0594db5ea7d23fc2192182b1a7b6973f628a9b8b2e0a42a2be721ac6"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99516d99316062824a24d145d694f5b0d030c80da693ea6f8c4ecf71a251d8bb"}, + {file = "hiredis-3.0.0-cp310-cp310-win32.whl", hash = "sha256:562eaf820de045eb487afaa37e6293fe7eceb5b25e158b5a1974b7e40bf04543"}, + {file = "hiredis-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1c81c89ed765198da27412aa21478f30d54ef69bf5e4480089d9c3f77b8f882"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:4664dedcd5933364756d7251a7ea86d60246ccf73a2e00912872dacbfcef8978"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:47de0bbccf4c8a9f99d82d225f7672b9dd690d8fd872007b933ef51a302c9fa6"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e43679eca508ba8240d016d8cca9d27342d70184773c15bea78a23c87a1922f1"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13c345e7278c210317e77e1934b27b61394fee0dec2e8bd47e71570900f75823"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00018f22f38530768b73ea86c11f47e8d4df65facd4e562bd78773bd1baef35e"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ea3a86405baa8eb0d3639ced6926ad03e07113de54cb00fd7510cb0db76a89d"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c073848d2b1d5561f3903879ccf4e1a70c9b1e7566c7bdcc98d082fa3e7f0a1d"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a8dffb5f5b3415a4669d25de48b617fd9d44b0bccfc4c2ab24b06406ecc9ecb"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:22c17c96143c2a62dfd61b13803bc5de2ac526b8768d2141c018b965d0333b66"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3ece960008dab66c6b8bb3a1350764677ee7c74ccd6270aaf1b1caf9ccebb46"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f75999ae00a920f7dce6ecae76fa5e8674a3110e5a75f12c7a2c75ae1af53396"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e069967cbd5e1900aafc4b5943888f6d34937fc59bf8918a1a546cb729b4b1e4"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0aacc0a78e1d94d843a6d191f224a35893e6bdfeb77a4a89264155015c65f126"}, + {file = "hiredis-3.0.0-cp311-cp311-win32.whl", hash = "sha256:719c32147ba29528cb451f037bf837dcdda4ff3ddb6cdb12c4216b0973174718"}, + {file = "hiredis-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:bdc144d56333c52c853c31b4e2e52cfbdb22d3da4374c00f5f3d67c42158970f"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:484025d2eb8f6348f7876fc5a2ee742f568915039fcb31b478fd5c242bb0fe3a"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fcdb552ffd97151dab8e7bc3ab556dfa1512556b48a367db94b5c20253a35ee1"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bb6f9fd92f147ba11d338ef5c68af4fd2908739c09e51f186e1d90958c68cc1"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa86bf9a0ed339ec9e8a9a9d0ae4dccd8671625c83f9f9f2640729b15e07fbfd"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e194a0d5df9456995d8f510eab9f529213e7326af6b94770abf8f8b7952ddcaa"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a1df39d74ec507d79c7a82c8063eee60bf80537cdeee652f576059b9cdd15c"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f91456507427ba36fd81b2ca11053a8e112c775325acc74e993201ea912d63e9"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9862db92ef67a8a02e0d5370f07d380e14577ecb281b79720e0d7a89aedb9ee5"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d10fcd9e0eeab835f492832b2a6edb5940e2f1230155f33006a8dfd3bd2c94e4"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:48727d7d405d03977d01885f317328dc21d639096308de126c2c4e9950cbd3c9"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e0bb6102ebe2efecf8a3292c6660a0e6fac98176af6de67f020bea1c2343717"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:df274e3abb4df40f4c7274dd3e587dfbb25691826c948bc98d5fead019dfb001"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:034925b5fb514f7b11aac38cd55b3fd7e9d3af23bd6497f3f20aa5b8ba58e232"}, + {file = "hiredis-3.0.0-cp312-cp312-win32.whl", hash = "sha256:120f2dda469b28d12ccff7c2230225162e174657b49cf4cd119db525414ae281"}, + {file = "hiredis-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:e584fe5f4e6681d8762982be055f1534e0170f6308a7a90f58d737bab12ff6a8"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:122171ff47d96ed8dd4bba6c0e41d8afaba3e8194949f7720431a62aa29d8895"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ba9fc605ac558f0de67463fb588722878641e6fa1dabcda979e8e69ff581d0bd"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a631e2990b8be23178f655cae8ac6c7422af478c420dd54e25f2e26c29e766f1"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63482db3fadebadc1d01ad33afa6045ebe2ea528eb77ccaabd33ee7d9c2bad48"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f669212c390eebfbe03c4e20181f5970b82c5d0a0ad1df1785f7ffbe7d61150"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a49ef161739f8018c69b371528bdb47d7342edfdee9ddc75a4d8caddf45a6e"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98a152052b8878e5e43a2e3a14075218adafc759547c98668a21e9485882696c"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50a196af0ce657fcde9bf8a0bbe1032e22c64d8fcec2bc926a35e7ff68b3a166"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2f312eef8aafc2255e3585dcf94d5da116c43ef837db91db9ecdc1bc930072d"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6ca41fa40fa019cde42c21add74aadd775e71458051a15a352eabeb12eb4d084"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:6eecb343c70629f5af55a8b3e53264e44fa04e155ef7989de13668a0cb102a90"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:c3fdad75e7837a475900a1d3a5cc09aa024293c3b0605155da2d42f41bc0e482"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8854969e7480e8d61ed7549eb232d95082a743e94138d98d7222ba4e9f7ecacd"}, + {file = "hiredis-3.0.0-cp38-cp38-win32.whl", hash = "sha256:f114a6c86edbf17554672b050cce72abf489fe58d583c7921904d5f1c9691605"}, + {file = "hiredis-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7d99b91e42217d7b4b63354b15b41ce960e27d216783e04c4a350224d55842a4"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:4c6efcbb5687cf8d2aedcc2c3ed4ac6feae90b8547427d417111194873b66b06"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5b5cff42a522a0d81c2ae7eae5e56d0ee7365e0c4ad50c4de467d8957aff4414"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82f794d564f4bc76b80c50b03267fe5d6589e93f08e66b7a2f674faa2fa76ebc"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a4c1791d7aa7e192f60fe028ae409f18ccdd540f8b1e6aeb0df7816c77e4a4"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2537b2cd98192323fce4244c8edbf11f3cac548a9d633dbbb12b48702f379f4"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fed69bbaa307040c62195a269f82fc3edf46b510a17abb6b30a15d7dab548df"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869f6d5537d243080f44253491bb30aa1ec3c21754003b3bddeadedeb65842b0"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d435ae89073d7cd51e6b6bf78369c412216261c9c01662e7008ff00978153729"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:204b79b30a0e6be0dc2301a4d385bb61472809f09c49f400497f1cdd5a165c66"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ea635101b739c12effd189cc19b2671c268abb03013fd1f6321ca29df3ca625"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f359175197fd833c8dd7a8c288f1516be45415bb5c939862ab60c2918e1e1943"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ac6d929cb33dd12ad3424b75725975f0a54b5b12dbff95f2a2d660c510aa106d"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:100431e04d25a522ef2c3b94f294c4219c4de3bfc7d557b6253296145a144c11"}, + {file = "hiredis-3.0.0-cp39-cp39-win32.whl", hash = "sha256:e1a9c14ae9573d172dc050a6f63a644457df5d01ec4d35a6a0f097f812930f83"}, + {file = "hiredis-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:54a6dd7b478e6eb01ce15b3bb5bf771e108c6c148315bf194eb2ab776a3cac4d"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50da7a9edf371441dfcc56288d790985ee9840d982750580710a9789b8f4a290"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b285ef6bf1581310b0d5e8f6ce64f790a1c40e89c660e1320b35f7515433672"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcfa684966f25b335072115de2f920228a3c2caf79d4bfa2b30f6e4f674a948"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a41be8af1fd78ca97bc948d789a09b730d1e7587d07ca53af05758f31f4b985d"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:038756db735e417ab36ee6fd7725ce412385ed2bd0767e8179a4755ea11b804f"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:fcecbd39bd42cef905c0b51c9689c39d0cc8b88b1671e7f40d4fb213423aef3a"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a131377493a59fb0f5eaeb2afd49c6540cafcfba5b0b3752bed707be9e7c4eaf"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d22c53f0ec5c18ecb3d92aa9420563b1c5d657d53f01356114978107b00b860"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a91e9520fbc65a799943e5c970ffbcd67905744d8becf2e75f9f0a5e8414f0"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc8043959b50141df58ab4f398e8ae84c6f9e673a2c9407be65fc789138f4a6"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b99cfac514173d7b8abdfe10338193e8a0eccdfe1870b646009d2fb7cbe4b5"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:fa1fcad89d8a41d8dc10b1e54951ec1e161deabd84ed5a2c95c3c7213bdb3514"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:898636a06d9bf575d2c594129085ad6b713414038276a4bfc5db7646b8a5be78"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:466f836dbcf86de3f9692097a7a01533dc9926986022c6617dc364a402b265c5"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23142a8af92a13fc1e3f2ca1d940df3dcf2af1d176be41fe8d89e30a837a0b60"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793c80a3d6b0b0e8196a2d5de37a08330125668c8012922685e17aa9108c33ac"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:467d28112c7faa29b7db743f40803d927c8591e9da02b6ce3d5fadc170a542a2"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dc384874a719c767b50a30750f937af18842ee5e288afba95a5a3ed703b1515a"}, + {file = "hiredis-3.0.0.tar.gz", hash = "sha256:fed8581ae26345dea1f1e0d1a96e05041a727a45e7d8d459164583e23c6ac441"}, ] [[package]] @@ -2013,17 +1998,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "24.1.0" +version = "24.2.1" description = "Python wrapper module around the OpenSSL library" optional = false python-versions = ">=3.7" files = [ - {file = "pyOpenSSL-24.1.0-py3-none-any.whl", hash = "sha256:17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad"}, - {file = "pyOpenSSL-24.1.0.tar.gz", hash = "sha256:cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f"}, + {file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"}, + {file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"}, ] [package.dependencies] -cryptography = ">=41.0.5,<43" +cryptography = ">=41.0.5,<44" [package.extras] docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"] @@ -2373,29 +2358,29 @@ files = [ [[package]] name = "ruff" -version = "0.5.0" +version = "0.5.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"}, - {file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"}, - {file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"}, - {file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"}, - {file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"}, - {file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"}, - {file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"}, - {file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"}, - {file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"}, + {file = "ruff-0.5.4-py3-none-linux_armv6l.whl", hash = "sha256:82acef724fc639699b4d3177ed5cc14c2a5aacd92edd578a9e846d5b5ec18ddf"}, + {file = "ruff-0.5.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:da62e87637c8838b325e65beee485f71eb36202ce8e3cdbc24b9fcb8b99a37be"}, + {file = "ruff-0.5.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e98ad088edfe2f3b85a925ee96da652028f093d6b9b56b76fc242d8abb8e2059"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c55efbecc3152d614cfe6c2247a3054cfe358cefbf794f8c79c8575456efe19"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9b85eaa1f653abd0a70603b8b7008d9e00c9fa1bbd0bf40dad3f0c0bdd06793"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cf497a47751be8c883059c4613ba2f50dd06ec672692de2811f039432875278"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:09c14ed6a72af9ccc8d2e313d7acf7037f0faff43cde4b507e66f14e812e37f7"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:628f6b8f97b8bad2490240aa84f3e68f390e13fabc9af5c0d3b96b485921cd60"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3520a00c0563d7a7a7c324ad7e2cde2355733dafa9592c671fb2e9e3cd8194c1"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93789f14ca2244fb91ed481456f6d0bb8af1f75a330e133b67d08f06ad85b516"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:029454e2824eafa25b9df46882f7f7844d36fd8ce51c1b7f6d97e2615a57bbcc"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9492320eed573a13a0bc09a2957f17aa733fff9ce5bf00e66e6d4a88ec33813f"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a6e1f62a92c645e2919b65c02e79d1f61e78a58eddaebca6c23659e7c7cb4ac7"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:768fa9208df2bec4b2ce61dbc7c2ddd6b1be9fb48f1f8d3b78b3332c7d71c1ff"}, + {file = "ruff-0.5.4-py3-none-win32.whl", hash = "sha256:e1e7393e9c56128e870b233c82ceb42164966f25b30f68acbb24ed69ce9c3a4e"}, + {file = "ruff-0.5.4-py3-none-win_amd64.whl", hash = "sha256:58b54459221fd3f661a7329f177f091eb35cf7a603f01d9eb3eb11cc348d38c4"}, + {file = "ruff-0.5.4-py3-none-win_arm64.whl", hash = "sha256:bd53da65f1085fb5b307c38fd3c0829e76acf7b2a912d8d79cadcdb4875c1eb7"}, + {file = "ruff-0.5.4.tar.gz", hash = "sha256:2795726d5f71c4f4e70653273d1c23a8182f07dd8e48c12de5d867bfb7557eed"}, ] [[package]] @@ -2430,13 +2415,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.8.0" +version = "2.10.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.8.0-py2.py3-none-any.whl", hash = "sha256:6051562d2cfa8087bb8b4b8b79dc44690f8a054762a29c07e22588b1f619bfb5"}, - {file = "sentry_sdk-2.8.0.tar.gz", hash = "sha256:aa4314f877d9cd9add5a0c9ba18e3f27f99f7de835ce36bd150e48a41c7c646f"}, + {file = "sentry_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:87b3d413c87d8e7f816cc9334bff255a83d8b577db2b22042651c30c19c09190"}, + {file = "sentry_sdk-2.10.0.tar.gz", hash = "sha256:545fcc6e36c335faa6d6cda84669b6e17025f31efbf3b2211ec14efe008b75d1"}, ] [package.dependencies] @@ -3230,4 +3215,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "3372a97db99050a34f8eddad2ddf8efe8b7b704b6123df4a3e36ddc171e8f34d" +content-hash = "e65fbd044230964cae8810c84289bcf0bc43b27532ea5a5ef8843eab4f6514af" diff --git a/pyproject.toml b/pyproject.toml index 1f9ee2b944..1adf8e087f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.111.0" +version = "1.112.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] license = "AGPL-3.0-or-later" @@ -322,7 +322,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.5.0" +ruff = "0.5.4" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" diff --git a/synapse/config/server.py b/synapse/config/server.py index 8bb97df175..fd52c0475c 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -384,6 +384,11 @@ class ServerConfig(Config): # Whether to internally track presence, requires that presence is enabled, self.track_presence = self.presence_enabled and presence_enabled != "untracked" + # Determines if presence results for offline users are included on initial/full sync + self.presence_include_offline_users_on_sync = presence_config.get( + "include_offline_users_on_sync", False + ) + # Custom presence router module # This is the legacy way of configuring it (the config should now be put in the modules section) self.presence_router_module_class = None diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 0432d97109..4fc6fcd7ae 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -39,6 +39,7 @@ from synapse.metrics.background_process_metrics import ( ) from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo from synapse.types import ( + DeviceListUpdates, JsonDict, JsonMapping, ScheduledTask, @@ -214,7 +215,7 @@ class DeviceWorkerHandler: @cancellable async def get_user_ids_changed( self, user_id: str, from_token: StreamToken - ) -> JsonDict: + ) -> DeviceListUpdates: """Get list of users that have had the devices updated, or have newly joined a room, that `user_id` may be interested in. """ @@ -341,11 +342,19 @@ class DeviceWorkerHandler: possibly_joined = set() possibly_left = set() - result = {"changed": list(possibly_joined), "left": list(possibly_left)} + device_list_updates = DeviceListUpdates( + changed=possibly_joined, + left=possibly_left, + ) - log_kv(result) + log_kv( + { + "changed": device_list_updates.changed, + "left": device_list_updates.left, + } + ) - return result + return device_list_updates async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: if not self.hs.is_mine(UserID.from_string(user_id)): diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index b07b62a8fc..9f8fc6013b 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -20,7 +20,18 @@ import logging from enum import Enum from itertools import chain -from typing import TYPE_CHECKING, Any, Dict, Final, List, Mapping, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Final, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, +) import attr from immutabledict import immutabledict @@ -35,6 +46,7 @@ from synapse.storage.databases.main.roommember import extract_heroes_from_room_s from synapse.storage.databases.main.stream import CurrentStateDeltaMembership from synapse.storage.roommember import MemberSummary from synapse.types import ( + DeviceListUpdates, JsonDict, PersistedEventPosition, Requester, @@ -333,7 +345,8 @@ class StateValues: # `sender` in the timeline). We only give special meaning to this value when it's a # `state_key`. LAZY: Final = "$LAZY" - + # Subsitute with the requester's user ID. Typically used by clients to get + # the user's membership. ME: Final = "$ME" @@ -346,6 +359,7 @@ class SlidingSyncHandler: self.notifier = hs.get_notifier() self.event_sources = hs.get_event_sources() self.relations_handler = hs.get_relations_handler() + self.device_handler = hs.get_device_handler() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.connection_store = SlidingSyncConnectionStore() @@ -376,10 +390,6 @@ class SlidingSyncHandler: # auth_blocking will occur) await self.auth_blocking.check_auth_blocking(requester=requester) - # TODO: If the To-Device extension is enabled and we have a `from_token`, delete - # any to-device messages before that token (since we now know that the device - # has received them). (see sync v2 for how to do this) - # If we're working with a user-provided token, we need to make sure to wait for # this worker to catch up with the token so we don't skip past any incoming # events or future events if the user is nefariously, manually modifying the @@ -459,8 +469,7 @@ class SlidingSyncHandler: raise NotImplementedError() await self.connection_store.mark_token_seen( - user_id, - conn_id=sync_config.connection_id(), + sync_config=sync_config, from_token=from_token, ) @@ -617,9 +626,8 @@ class SlidingSyncHandler: rooms_should_send = set() for room_id in relevant_room_map: status = await self.connection_store.have_sent_room( - user_id, - sync_config.connection_id(), - from_token.connection_token, + sync_config, + from_token.connection_position, room_id, ) if status.status != HaveSentRoomFlag.LIVE: @@ -659,20 +667,23 @@ class SlidingSyncHandler: await concurrently_execute(handle_room, relevant_room_map, 10) extensions = await self.get_extensions_response( - sync_config=sync_config, to_token=to_token + sync_config=sync_config, + from_token=from_token, + to_token=to_token, ) if has_lists or has_room_subscriptions: connection_token = await self.connection_store.record_rooms( - user_id, - conn_id=sync_config.connection_id(), + sync_config=sync_config, from_token=from_token, sent_room_ids=relevant_room_map.keys(), - unsent_room_ids=[], # TODO: We currently ssume that we have sent down all updates. + # TODO: We need to calculate which rooms have had updates since the `from_token` but were not included in the `sent_room_ids` + unsent_room_ids=[], ) elif from_token: - connection_token = from_token.connection_token + connection_token = from_token.connection_position else: + # Initial sync without a `from_token` starts at `0` connection_token = 0 return SlidingSyncResult( @@ -1277,18 +1288,18 @@ class SlidingSyncHandler: last_activity_in_room_map: Dict[str, int] = {} for room_id, room_for_user in sync_room_map.items(): - # If they are fully-joined to the room, let's find the latest activity - # at/before the `to_token`. if room_for_user.membership != Membership.JOIN: - # Otherwise, if the user has left/been invited/knocked/been banned from - # a room, they shouldn't see anything past that point. + # If the user has left/been invited/knocked/been banned from a + # room, they shouldn't see anything past that point. # - # FIXME: It's possible that people should see beyond this point in - # invited/knocked cases if for example the room has + # FIXME: It's possible that people should see beyond this point + # in invited/knocked cases if for example the room has # `invite`/`world_readable` history visibility, see # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 last_activity_in_room_map[room_id] = room_for_user.event_pos.stream + # For fully-joined rooms, we find the latest activity at/before the + # `to_token`. joined_room_positions = ( await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( [ @@ -1441,24 +1452,23 @@ class SlidingSyncHandler: # - When users `newly_joined` # - For an incremental sync where we haven't sent it down this # connection before - to_bound = None + from_bound = None initial = True if from_token and not room_membership_for_user_at_to_token.newly_joined: room_status = await self.connection_store.have_sent_room( - user_id=user.to_string(), - conn_id=sync_config.connection_id(), - connection_token=from_token.connection_token, + sync_config=sync_config, + connection_token=from_token.connection_position, room_id=room_id, ) if room_status.status == HaveSentRoomFlag.LIVE: - to_bound = from_token.stream_token.room_key + from_bound = from_token.stream_token.room_key initial = False elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: assert room_status.last_token is not None - to_bound = room_status.last_token + from_bound = room_status.last_token initial = False elif room_status.status == HaveSentRoomFlag.NEVER: - to_bound = None + from_bound = None initial = True else: assert_never(room_status.status) @@ -1488,13 +1498,13 @@ class SlidingSyncHandler: prev_batch_token = to_token # We're going to paginate backwards from the `to_token` - from_bound = to_token.room_key + to_bound = to_token.room_key # People shouldn't see past their leave/ban event if room_membership_for_user_at_to_token.membership in ( Membership.LEAVE, Membership.BAN, ): - from_bound = ( + to_bound = ( room_membership_for_user_at_to_token.event_pos.to_room_stream_token() ) @@ -1504,8 +1514,11 @@ class SlidingSyncHandler: timeline_events, new_room_key = await self.store.paginate_room_events( room_id=room_id, - from_key=from_bound, - to_key=to_bound, + # The bounds are reversed so we can paginate backwards + # (from newer to older events) starting at to_bound. + # This ensures we fill the `limit` with the newest events first, + from_key=to_bound, + to_key=from_bound, direction=Direction.BACKWARDS, # We add one so we can determine if there are enough events to saturate # the limit or not (see `limited`) @@ -1735,13 +1748,8 @@ class SlidingSyncHandler: # FIXME: We probably also care about invite, ban, kick, targets, etc # but the spec only mentions "senders". - elif ( - state_type == EventTypes.Member - and state_key == StateValues.ME - ): - required_state_types.append( - (EventTypes.Member, user.to_string()) - ) + elif state_key == StateValues.ME: + required_state_types.append((state_type, user.to_string())) else: required_state_types.append((state_type, state_key)) @@ -1772,10 +1780,12 @@ class SlidingSyncHandler: to_token=to_token, ) else: - assert to_bound is not None + assert from_bound is not None deltas = await self.store.get_current_state_deltas_for_room( - room_id, to_bound, to_token.room_key + room_id=room_id, + from_token=from_bound, + to_token=to_token.room_key, ) # TODO: Filter room state before fetching events # TODO: Handle state resets where event_id is None @@ -1874,33 +1884,47 @@ class SlidingSyncHandler: self, sync_config: SlidingSyncConfig, to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], ) -> SlidingSyncResult.Extensions: """Handle extension requests. Args: sync_config: Sync configuration to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. """ if sync_config.extensions is None: return SlidingSyncResult.Extensions() to_device_response = None - if sync_config.extensions.to_device: - to_device_response = await self.get_to_device_extensions_response( + if sync_config.extensions.to_device is not None: + to_device_response = await self.get_to_device_extension_response( sync_config=sync_config, to_device_request=sync_config.extensions.to_device, to_token=to_token, ) - return SlidingSyncResult.Extensions(to_device=to_device_response) + e2ee_response = None + if sync_config.extensions.e2ee is not None: + e2ee_response = await self.get_e2ee_extension_response( + sync_config=sync_config, + e2ee_request=sync_config.extensions.e2ee, + to_token=to_token, + from_token=from_token, + ) - async def get_to_device_extensions_response( + return SlidingSyncResult.Extensions( + to_device=to_device_response, + e2ee=e2ee_response, + ) + + async def get_to_device_extension_response( self, sync_config: SlidingSyncConfig, to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, to_token: StreamToken, - ) -> SlidingSyncResult.Extensions.ToDeviceExtension: + ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]: """Handle to-device extension (MSC3885) Args: @@ -1908,14 +1932,16 @@ class SlidingSyncHandler: to_device_request: The to-device extension from the request to_token: The point in the stream to sync up to. """ - user_id = sync_config.user.to_string() device_id = sync_config.requester.device_id + # Skip if the extension is not enabled + if not to_device_request.enabled: + return None + # Check that this request has a valid device ID (not all requests have - # to belong to a device, and so device_id is None), and that the - # extension is enabled. - if device_id is None or not to_device_request.enabled: + # to belong to a device, and so device_id is None) + if device_id is None: return SlidingSyncResult.Extensions.ToDeviceExtension( next_batch=f"{to_token.to_device_key}", events=[], @@ -1968,6 +1994,56 @@ class SlidingSyncHandler: events=messages, ) + async def get_e2ee_extension_response( + self, + sync_config: SlidingSyncConfig, + e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: + """Handle E2EE device extension (MSC3884) + + Args: + sync_config: Sync configuration + e2ee_request: The e2ee extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + user_id = sync_config.user.to_string() + device_id = sync_config.requester.device_id + + # Skip if the extension is not enabled + if not e2ee_request.enabled: + return None + + device_list_updates: Optional[DeviceListUpdates] = None + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + device_list_updates = await self.device_handler.get_user_ids_changed( + user_id=user_id, + from_token=from_token.stream_token, + ) + + device_one_time_keys_count: Mapping[str, int] = {} + device_unused_fallback_key_types: Sequence[str] = [] + if device_id: + # TODO: We should have a way to let clients differentiate between the states of: + # * no change in OTK count since the provided since token + # * the server has zero OTKs left for this device + # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 + device_one_time_keys_count = await self.store.count_e2e_one_time_keys( + user_id, device_id + ) + device_unused_fallback_key_types = ( + await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + + return SlidingSyncResult.Extensions.E2eeExtension( + device_list_updates=device_list_updates, + device_one_time_keys_count=device_one_time_keys_count, + device_unused_fallback_key_types=device_unused_fallback_key_types, + ) + class HaveSentRoomFlag(Enum): """Flag for whether we have sent the room down a sliding sync connection. @@ -2047,13 +2123,14 @@ class SlidingSyncConnectionStore: ) async def have_sent_room( - self, user_id: str, conn_id: str, connection_token: int, room_id: str + self, sync_config: SlidingSyncConfig, connection_token: int, room_id: str ) -> HaveSentRoom: - """Whether for the given user_id/conn_id/token, return whether we have + """For the given user_id/conn_id/token, return whether we have previously sent the room down """ - sync_statuses = self._connections.setdefault((user_id, conn_id), {}) + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.setdefault(conn_key, {}) room_status = sync_statuses.get(connection_token, {}).get( room_id, HAVE_SENT_ROOM_NEVER ) @@ -2062,8 +2139,7 @@ class SlidingSyncConnectionStore: async def record_rooms( self, - user_id: str, - conn_id: str, + sync_config: SlidingSyncConfig, from_token: Optional[SlidingSyncStreamToken], *, sent_room_ids: StrCollection, @@ -2072,25 +2148,25 @@ class SlidingSyncConnectionStore: """Record which rooms we have/haven't sent down in a new response Attributes: - user_id - conn_id + sync_config from_token: The since token from the request, if any sent_room_ids: The set of room IDs that we have sent down as part of this request (only needs to be ones we didn't previously sent down). unsent_room_ids: The set of room IDs that have had updates - since the `last_room_token`, but which were not included in + since the `from_token`, but which were not included in this request """ prev_connection_token = 0 if from_token is not None: - prev_connection_token = from_token.connection_token + prev_connection_token = from_token.connection_position # If there are no changes then this is a noop. if not sent_room_ids and not unsent_room_ids: return prev_connection_token - sync_statuses = self._connections.setdefault((user_id, conn_id), {}) + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.setdefault(conn_key, {}) # Generate a new token, removing any existing entries in that token # (which can happen if requests get resent). @@ -2138,8 +2214,7 @@ class SlidingSyncConnectionStore: async def mark_token_seen( self, - user_id: str, - conn_id: str, + sync_config: SlidingSyncConfig, from_token: Optional[SlidingSyncStreamToken], ) -> None: """We have received a request with the given token, so we can clear out @@ -2151,14 +2226,49 @@ class SlidingSyncConnectionStore: # Clear out any tokens for the connection that doesn't match the one # from the request. - sync_statuses = self._connections.pop((user_id, conn_id), {}) + conn_key = self._get_connection_key(sync_config) + sync_statuses = self._connections.pop(conn_key, {}) if from_token is None: return sync_statuses = { - i: room_statuses - for i, room_statuses in sync_statuses.items() - if i == from_token.connection_token + connection_token: room_statuses + for connection_token, room_statuses in sync_statuses.items() + if connection_token == from_token.connection_position } if sync_statuses: - self._connections[(user_id, conn_id)] = sync_statuses + self._connections[conn_key] = sync_statuses + + @staticmethod + def _get_connection_key(sync_config: SlidingSyncConfig) -> Tuple[str, str]: + """Return a unique identifier for this connection. + + The first part is simply the user ID. + + The second part is generally a combination of device ID and conn_id. + However, both these two are optional (e.g. puppet access tokens don't + have device IDs), so this handles those edge cases. + + We use this over the raw `conn_id` to avoid clashes between different + clients that use the same `conn_id`. Imagine a user uses a web client + that uses `conn_id: main_sync_loop` and an Android client that also has + a `conn_id: main_sync_loop`. + """ + + user_id = sync_config.user.to_string() + + # If this is missing, only one sliding sync connection is allowed per + # given conn_id. + conn_id = sync_config.conn_id or "" + + if sync_config.requester.device_id: + return (user_id, f"D/{sync_config.requester.device_id}/{conn_id}") + + if sync_config.requester.access_token_id: + # If we don't have a device, then the access token ID should be a + # stable ID. + return (user_id, f"A/{sync_config.requester.access_token_id}/{conn_id}") + + # If we have neither then its likely an AS or some weird token. Either + # way we can just fail here. + raise Exception("Cannot use sliding sync with access token type") diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index fefc35ecdb..f66db4df78 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -2270,7 +2270,11 @@ class SyncHandler: user=user, from_key=presence_key, is_guest=sync_config.is_guest, - include_offline=include_offline, + include_offline=( + True + if self.hs_config.server.presence_include_offline_users_on_sync + else include_offline + ), ) assert presence_key sync_result_builder.now_token = now_token.copy_and_replace( diff --git a/synapse/notifier.py b/synapse/notifier.py index c3ecf86ec4..7a2b54036c 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -773,6 +773,7 @@ class Notifier: stream_token = await self.event_sources.bound_future_token(stream_token) start = self.clock.time_msec() + logged = False while True: current_token = self.event_sources.get_current_token() if stream_token.is_before_or_eq(current_token): @@ -783,11 +784,13 @@ class Notifier: if now - start > 10_000: return False - logger.info( - "Waiting for current token to reach %s; currently at %s", - stream_token, - current_token, - ) + if not logged: + logger.info( + "Waiting for current token to reach %s; currently at %s", + stream_token, + current_token, + ) + logged = True # TODO: be better await self.clock.sleep(0.5) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 67de634eab..eddad7d5b8 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -256,9 +256,15 @@ class KeyChangesServlet(RestServlet): user_id = requester.user.to_string() - results = await self.device_handler.get_user_ids_changed(user_id, from_token) + device_list_updates = await self.device_handler.get_user_ids_changed( + user_id, from_token + ) + + response: JsonDict = {} + response["changed"] = list(device_list_updates.changed) + response["left"] = list(device_list_updates.left) - return 200, results + return 200, response class OneTimeKeyServlet(RestServlet): diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 7c91b15cef..3eb9aa5a1f 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -1087,15 +1087,41 @@ class SlidingSyncRestServlet(RestServlet): async def encode_extensions( self, requester: Requester, extensions: SlidingSyncResult.Extensions ) -> JsonDict: - result = {} + serialized_extensions: JsonDict = {} if extensions.to_device is not None: - result["to_device"] = { + serialized_extensions["to_device"] = { "next_batch": extensions.to_device.next_batch, "events": extensions.to_device.events, } - return result + if extensions.e2ee is not None: + serialized_extensions["e2ee"] = { + # We always include this because + # https://github.com/vector-im/element-android/issues/3725. The spec + # isn't terribly clear on when this can be omitted and how a client + # would tell the difference between "no keys present" and "nothing + # changed" in terms of whole field absent / individual key type entry + # absent Corresponding synapse issue: + # https://github.com/matrix-org/synapse/issues/10456 + "device_one_time_keys_count": extensions.e2ee.device_one_time_keys_count, + # https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md + # states that this field should always be included, as long as the + # server supports the feature. + "device_unused_fallback_key_types": extensions.e2ee.device_unused_fallback_key_types, + } + + if extensions.e2ee.device_list_updates is not None: + serialized_extensions["e2ee"]["device_lists"] = {} + + serialized_extensions["e2ee"]["device_lists"]["changed"] = list( + extensions.e2ee.device_list_updates.changed + ) + serialized_extensions["e2ee"]["device_lists"]["left"] = list( + extensions.e2ee.device_list_updates.left + ) + + return serialized_extensions def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: diff --git a/synapse/server.py b/synapse/server.py index 4a3f9ff934..46b9d83a04 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -559,6 +559,7 @@ class HomeServer(metaclass=abc.ABCMeta): def get_sync_handler(self) -> SyncHandler: return SyncHandler(self) + @cache_in_self def get_sliding_sync_handler(self) -> SlidingSyncHandler: return SlidingSyncHandler(self) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 24abab4a23..715846865b 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1313,6 +1313,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas # We want to make the cache more effective, so we clamp to the last # change before the given ordering. last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id) # type: ignore[attr-defined] + if last_change is None: + # If the room isn't in the cache we know that the last change was + # somewhere before the earliest known position of the cache, so we + # can clamp to that. + last_change = self._events_stream_cache.get_earliest_known_position() # type: ignore[attr-defined] # We don't always have a full stream_to_exterm_id table, e.g. after # the upgrade that introduced it, so we make sure we never ask for a diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index cd6cb2c7a9..da3ebe66b8 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -162,8 +162,7 @@ class StateDeltasStore(SQLBaseStore): async def get_current_state_deltas_for_room( self, room_id: str, from_token: RoomStreamToken, to_token: RoomStreamToken ) -> List[StateDelta]: - """Get the state deltas between that have happened between two - tokens.""" + """Get the state deltas between two tokens.""" def get_current_state_deltas_for_room_txn( txn: LoggingTransaction, diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 7df811e451..395a1f46af 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1355,32 +1355,46 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): room_ids: StrCollection, end_token: RoomStreamToken, ) -> Dict[str, int]: - """Bulk fetch the latest event position in the given rooms""" + """Bulk fetch the stream position of the latest events in the given + rooms + """ min_token = end_token.stream max_token = end_token.get_max_stream_pos() results: Dict[str, int] = {} + # First, we check for the rooms in the stream change cache to see if we + # can just use the latest position from it. missing_room_ids: Set[str] = set() for room_id in room_ids: - stream_pos = self._events_stream_cache._entity_to_key.get(room_id) - if stream_pos and stream_pos < max_token: + stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id) + if stream_pos and stream_pos <= min_token: results[room_id] = stream_pos else: missing_room_ids.add(room_id) + # Next, we query the stream position from the DB. At first we fetch all + # positions less than the *max* stream pos in the token, then filter + # them down. We do this as a) this is a cheaper query, and b) the vast + # majority of rooms will have a latest token from before the min stream + # pos. + def bulk_get_last_event_pos_txn( txn: LoggingTransaction, batch_room_ids: StrCollection ) -> Dict[str, int]: + # This query fetches the latest stream position in the rooms before + # the given max position. clause, args = make_in_list_sql_clause( self.database_engine, "room_id", batch_room_ids ) sql = f""" SELECT room_id, ( SELECT stream_ordering FROM events AS e + LEFT JOIN rejections USING (event_id) WHERE e.room_id = r.room_id AND stream_ordering <= ? AND NOT outlier + AND rejection_reason IS NULL ORDER BY stream_ordering DESC LIMIT 1 ) @@ -1398,15 +1412,25 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): batched, ) + # Check that the stream position for the rooms are from before the + # minimum position of the token. If not then we need to fetch more + # rows. for room_id, stream in result.items(): - if min_token < stream: - recheck_rooms.add(room_id) - else: + if stream <= min_token: results[room_id] = stream + else: + recheck_rooms.add(room_id) if not recheck_rooms: return results + # For the remaining rooms we need to fetch all rows between the min and + # max stream positions in the end token, and filter out the rows that + # are after the end token. + # + # This query should be fast as the range between the min and max should + # be small. + def bulk_get_last_event_pos_recheck_txn( txn: LoggingTransaction, batch_room_ids: StrCollection ) -> Dict[str, int]: @@ -1415,19 +1439,26 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) sql = f""" SELECT room_id, instance_name, stream_ordering + FROM events WHERE ? < stream_ordering AND stream_ordering <= ? + AND NOT outlier + AND rejection_reason IS NULL AND {clause} ORDER BY stream_ordering ASC """ txn.execute(sql, [min_token, max_token] + args) - results: Dict[str, int] = {} + + # We take the max stream ordering that is less than the token. Since + # we ordered by stream ordering we just need to iterate through and + # take the last matching stream ordering. + txn_results: Dict[str, int] = {} for row in txn: room_id = row[0] event_pos = PersistedEventPosition(row[1], row[2]) if not event_pos.persisted_after(end_token): - results[room_id] = event_pos.stream + txn_results[room_id] = event_pos.stream - return results + return txn_results for batched in batch_iter(recheck_rooms, 1000): recheck_result = await self.db_pool.runInteraction( diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 23ac1842f8..5259550f1c 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -777,6 +777,13 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): return super().bound_stream_token(max_stream) + def __str__(self) -> str: + instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items())) + return ( + f"RoomStreamToken(stream: {self.stream}, topological: {self.topological}, " + f"instances: {{{instances}}})" + ) + @attr.s(frozen=True, slots=True, order=False) class MultiWriterStreamToken(AbstractMultiWriterStreamToken): @@ -873,6 +880,13 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): return True + def __str__(self) -> str: + instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items())) + return ( + f"MultiWriterStreamToken(stream: {self.stream}, " + f"instances: {{{instances}}})" + ) + class StreamKeyType(Enum): """Known stream types. @@ -1131,6 +1145,15 @@ class StreamToken: return True + def __str__(self) -> str: + return ( + f"StreamToken(room: {self.room_key}, presence: {self.presence_key}, " + f"typing: {self.typing_key}, receipt: {self.receipt_key}, " + f"account_data: {self.account_data_key}, push_rules: {self.push_rules_key}, " + f"to_device: {self.to_device_key}, device_list: {self.device_list_key}, " + f"groups: {self.groups_key}, un_partial_stated_rooms: {self.un_partial_stated_rooms_key})" + ) + StreamToken.START = StreamToken( RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0 @@ -1145,23 +1168,29 @@ class SlidingSyncStreamToken: This then looks something like: 5/s2633508_17_338_6732159_1082514_541479_274711_265584_1_379 + + Attributes: + stream_token: Token representing the position of all the standard + streams. + connection_position: Token used by sliding sync to track updates to any + per-connection state stored by Synapse. """ stream_token: StreamToken - connection_token: int + connection_position: int @staticmethod @cancellable async def from_string(store: "DataStore", string: str) -> "SlidingSyncStreamToken": """Creates a SlidingSyncStreamToken from its textual representation.""" try: - connection_token_str, stream_token_str = string.split("/", 1) - connection_token = int(connection_token_str) + connection_position_str, stream_token_str = string.split("/", 1) + connection_position = int(connection_position_str) stream_token = await StreamToken.from_string(store, stream_token_str) return SlidingSyncStreamToken( stream_token=stream_token, - connection_token=connection_token, + connection_position=connection_position, ) except CancelledError: raise @@ -1171,7 +1200,7 @@ class SlidingSyncStreamToken: async def to_string(self, store: "DataStore") -> str: """Serializes the token to a string""" stream_token_str = await self.stream_token.to_string(store) - return f"{self.connection_token}/{stream_token_str}" + return f"{self.connection_position}/{stream_token_str}" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -1256,11 +1285,12 @@ class ReadReceipt: @attr.s(slots=True, frozen=True, auto_attribs=True) class DeviceListUpdates: """ - An object containing a diff of information regarding other users' device lists, intended for - a recipient to carry out device list tracking. + An object containing a diff of information regarding other users' device lists, + intended for a recipient to carry out device list tracking. Attributes: - changed: A set of users whose device lists have changed recently. + changed: A set of users who have updated their device identity or + cross-signing keys, or who now share an encrypted room with. left: A set of users who the recipient no longer needs to track the device lists of. Typically when those users no longer share any end-to-end encryption enabled rooms. """ diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py index 0c2ab13c93..7c7fe130cb 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py @@ -18,7 +18,7 @@ # # from enum import Enum -from typing import TYPE_CHECKING, Dict, Final, List, Optional, Sequence, Tuple +from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple import attr from typing_extensions import TypedDict @@ -32,6 +32,7 @@ else: from synapse.events import EventBase from synapse.types import ( + DeviceListUpdates, JsonDict, JsonMapping, Requester, @@ -120,31 +121,6 @@ class SlidingSyncConfig(SlidingSyncBody): # Allow custom types like `UserID` to be used in the model arbitrary_types_allowed = True - def connection_id(self) -> str: - """Return a string identifier for this connection. May clash with - connection IDs from different users. - - This is generally a combination of device ID and conn_id. However, both - these two are optional (e.g. puppet access tokens don't have device - IDs), so this handles those edge cases. - """ - - # `conn_id` can be null, in which case we default to the empty string - # (if conn ID is empty then the client can't have multiple sync loops) - conn_id = self.conn_id or "" - - if self.requester.device_id: - return f"D/{self.requester.device_id}/{conn_id}" - - if self.requester.access_token_id: - # If we don't have a device, then the access token ID should be a - # stable ID. - return f"A/{self.requester.access_token_id}/{conn_id}" - - # If we have neither then its likely an AS or some weird token. Either - # way we can just fail here. - raise Exception("Cannot use sliding sync with access token type") - class OperationType(Enum): """ @@ -296,6 +272,7 @@ class SlidingSyncResult: Attributes: to_device: The to-device extension (MSC3885) + e2ee: The E2EE device extension (MSC3884) """ @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -314,10 +291,51 @@ class SlidingSyncResult: def __bool__(self) -> bool: return bool(self.events) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class E2eeExtension: + """The E2EE device extension (MSC3884) + + Attributes: + device_list_updates: List of user_ids whose devices have changed or left (only + present on incremental syncs). + device_one_time_keys_count: Map from key algorithm to the number of + unclaimed one-time keys currently held on the server for this device. If + an algorithm is unlisted, the count for that algorithm is assumed to be + zero. If this entire parameter is missing, the count for all algorithms + is assumed to be zero. + device_unused_fallback_key_types: List of unused fallback key algorithms + for this device. + """ + + # Only present on incremental syncs + device_list_updates: Optional[DeviceListUpdates] + device_one_time_keys_count: Mapping[str, int] + device_unused_fallback_key_types: Sequence[str] + + def __bool__(self) -> bool: + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + default_otk = self.device_one_time_keys_count.get("signed_curve25519") + more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( + default_otk is not None and default_otk > 0 + ) + + return bool( + more_than_default_otk + or self.device_list_updates + or self.device_unused_fallback_key_types + ) + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None def __bool__(self) -> bool: - return bool(self.to_device) + return bool(self.to_device or self.e2ee) next_pos: SlidingSyncStreamToken lists: Dict[str, SlidingWindowList] diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py index 5be8cf5389..511e6e2d9f 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py @@ -121,8 +121,7 @@ class SlidingSyncBody(RequestBodyModel): Attributes: conn_id: An optional string to identify this connection to the server. If this - is missing, only 1 sliding sync connection can be made to the server at - any one time. + is missing, only one sliding sync connection is allowed per given conn_id. lists: Sliding window API. A map of list key to list information (:class:`SlidingSyncList`). Max lists: 100. The list keys should be arbitrary strings which the client is using to refer to the list. Keep this @@ -316,7 +315,17 @@ class SlidingSyncBody(RequestBodyModel): return value + class E2eeExtension(RequestBodyModel): + """The E2EE device extension (MSC3884) + + Attributes: + enabled + """ + + enabled: Optional[StrictBool] = False + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None conn_id: Optional[str] diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 91c335f85b..16fcb00206 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -327,7 +327,7 @@ class StreamChangeCache: for entity in r: self._entity_to_key.pop(entity, None) - def get_max_pos_of_last_change(self, entity: EntityType) -> int: + def get_max_pos_of_last_change(self, entity: EntityType) -> Optional[int]: """Returns an upper bound of the stream id of the last change to an entity. @@ -335,7 +335,11 @@ class StreamChangeCache: entity: The entity to check. Return: - The stream position of the latest change for the given entity or - the earliest known stream position if the entitiy is unknown. + The stream position of the latest change for the given entity, if + known """ - return self._entity_to_key.get(entity, self._earliest_known_stream_pos) + return self._entity_to_key.get(entity) + + def get_earliest_known_position(self) -> int: + """Returns the earliest position in the cache.""" + return self._earliest_known_stream_pos diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index a7aa9bb8af..5e91be0026 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -19,7 +19,7 @@ # import logging from copy import deepcopy -from typing import Dict, Optional +from typing import Dict, Optional, Tuple from unittest.mock import patch from parameterized import parameterized @@ -46,7 +46,13 @@ from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import JsonDict, StreamToken, UserID +from synapse.types import ( + JsonDict, + SlidingSyncStreamToken, + StreamToken, + UserID, + create_requester, +) from synapse.types.handlers import SlidingSyncConfig from synapse.util import Clock @@ -3759,3 +3765,390 @@ class SortRoomsTestCase(HomeserverTestCase): # We only care about the *latest* event in the room. [room_id1, room_id2], ) + + +class GetRoomSyncDataTestCase(HomeserverTestCase): + """ + Tests for Sliding Sync handler `get_room_sync_data()` + """ + + servlets = [ + admin.register_servlets, + knock.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.sliding_sync_handler = self.hs.get_sliding_sync_handler() + self.store = self.hs.get_datastores().main + self.event_sources = hs.get_event_sources() + + def _create_sync_configs( + self, + user_id: str, + room_id: str, + timeline_limit: int = 5, + ) -> Tuple[SlidingSyncConfig, RoomSyncConfig, _RoomMembershipForUser]: + """Create the configs necessary to call `get_room_sync_data`""" + requester = create_requester(user_id, device_id="foo_device") + sync_config = SlidingSyncConfig( + user=requester.user, + requester=requester, + conn_id="conn_id", + lists={ + "list": SlidingSyncConfig.SlidingSyncList( + timeline_limit=timeline_limit, + required_state=[ + (EventTypes.Name, ""), + ], + ), + }, + room_subscriptions={}, + extensions=None, + ) + + room_sync_config = RoomSyncConfig(timeline_limit, {EventTypes.Name: {""}}) + + rooms = self.get_success( + self.store.get_rooms_for_local_user_where_membership_is( + user_id, membership_list=[Membership.JOIN] + ) + ) + room_for_user = rooms[0] + assert room_for_user.room_id == room_id + + room_membership_for_user_at_to_token = _RoomMembershipForUser( + room_id=room_id, + event_id=room_for_user.event_id, + event_pos=room_for_user.event_pos, + membership=Membership.JOIN, + sender=user_id, + newly_joined=False, + newly_left=False, + is_dm=False, + ) + + return (sync_config, room_sync_config, room_membership_for_user_at_to_token) + + def test_room_sync_data_initial(self) -> None: + """Tests getting room sync data with no from token""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + sync_config, room_sync_config, room_membership_for_user_at_to_token = ( + self._create_sync_configs(user1_id, room_id1, timeline_limit=5) + ) + + # Timeline limit is 5 so let's send 5 messages that we'll expect to get + # back. + expected_timeline = [] + for _ in range(5): + r = self.helper.send(room_id1, "message", tok=user1_tok) + expected_timeline.append(r["event_id"]) + + to_token = self.event_sources.get_current_token() + + result = self.get_success( + self.sliding_sync_handler.get_room_sync_data( + sync_config, + room_id=room_id1, + room_sync_config=room_sync_config, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + from_token=None, + to_token=to_token, + ) + ) + + self.assertTrue(result.initial) + self.assertTrue(result.limited) + self.assertEqual( + [e.event_id for e in result.timeline_events], expected_timeline + ) + + def test_room_sync_data_incremental_live(self) -> None: + """Test getting room data where we have previously sent down the room + and its state is considered LIVE""" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + sync_config, room_sync_config, room_membership_for_user_at_to_token = ( + self._create_sync_configs(user1_id, room_id1) + ) + + # These messages are sent before the `from_token`, so we don't expect to + # see these messages. + for _ in range(5): + r = self.helper.send(room_id1, "message", tok=user1_tok) + + # Mark the room as having been sent down, and create an appropriate + # `from_token`. + connection_token = self.get_success( + self.sliding_sync_handler.connection_store.record_rooms( + sync_config, None, sent_room_ids=[room_id1], unsent_room_ids=[] + ) + ) + from_token = SlidingSyncStreamToken( + self.event_sources.get_current_token(), connection_token + ) + + # These messages are sent after the `from_token`, so we expect to only + # see these messages. + expected_timeline = [] + for _ in range(2): + r = self.helper.send(room_id1, "message", tok=user1_tok) + expected_timeline.append(r["event_id"]) + + to_token = self.event_sources.get_current_token() + + result = self.get_success( + self.sliding_sync_handler.get_room_sync_data( + sync_config, + room_id=room_id1, + room_sync_config=room_sync_config, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + from_token=from_token, + to_token=to_token, + ) + ) + + self.assertFalse(result.initial) + self.assertFalse(result.limited) + self.assertEqual( + [e.event_id for e in result.timeline_events], expected_timeline + ) + + def test_room_sync_data_incremental_previously_not_limited(self) -> None: + """Test getting room data where we have previously sent down the room, + but we missed sending down some data previously and so its state is + considered PREVIOUSLY. + + In this case there has been more than timeline limit events. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + sync_config, room_sync_config, room_membership_for_user_at_to_token = ( + self._create_sync_configs(user1_id, room_id1) + ) + + # These messages are sent before the initial `from_token`, so we don't + # expect to see these messages. + for _ in range(5): + r = self.helper.send(room_id1, "message", tok=user1_tok) + + # Mark the room as having been sent down, and create an appropriate + # `from_token`. + connection_token = self.get_success( + self.sliding_sync_handler.connection_store.record_rooms( + sync_config, None, sent_room_ids=[room_id1], unsent_room_ids=[] + ) + ) + from_token = SlidingSyncStreamToken( + self.event_sources.get_current_token(), connection_token + ) + + # These messages are sent after the initial `from_token`, so we expect + # to see these messages. + expected_timeline = [] + for _ in range(2): + r = self.helper.send(room_id1, "message", tok=user1_tok) + expected_timeline.append(r["event_id"]) + + # Mark the room as *not* having been sent down, and create a new + # `from_token`. + connection_token = self.get_success( + self.sliding_sync_handler.connection_store.record_rooms( + sync_config, from_token, sent_room_ids=[], unsent_room_ids=[room_id1] + ) + ) + + from_token = SlidingSyncStreamToken( + self.event_sources.get_current_token(), connection_token + ) + + # We should also receive new messages + for _ in range(2): + r = self.helper.send(room_id1, "message", tok=user1_tok) + expected_timeline.append(r["event_id"]) + + to_token = self.event_sources.get_current_token() + + result = self.get_success( + self.sliding_sync_handler.get_room_sync_data( + sync_config, + room_id=room_id1, + room_sync_config=room_sync_config, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + from_token=from_token, + to_token=to_token, + ) + ) + + self.assertFalse(result.initial) + self.assertFalse(result.limited) + self.assertEqual( + [e.event_id for e in result.timeline_events], expected_timeline + ) + + def test_room_sync_data_incremental_previously_limited(self) -> None: + """Test getting room data where we have previously sent down the room, + but we missed sending down some data previously and so its state is + considered PREVIOUSLY. + + In this case there has been fewer than timeline limit events. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + sync_config, room_sync_config, room_membership_for_user_at_to_token = ( + self._create_sync_configs(user1_id, room_id1) + ) + + # These messages are sent before the initial `from_token`, so we don't + # expect to see these messages. + for _ in range(5): + r = self.helper.send(room_id1, "message", tok=user1_tok) + + # Mark the room as having been sent down, and create an appropriate + # `from_token`. + connection_token = self.get_success( + self.sliding_sync_handler.connection_store.record_rooms( + sync_config, None, sent_room_ids=[room_id1], unsent_room_ids=[] + ) + ) + from_token = SlidingSyncStreamToken( + self.event_sources.get_current_token(), connection_token + ) + + # These messages are sent after the initial `from_token`, but are before + # the timeline limit, so we don't expect to see these messages. + for _ in range(5): + r = self.helper.send(room_id1, "message", tok=user1_tok) + + # ... but these messages are within the timeline limit, so we do expect + # to see them + expected_timeline = [] + for _ in range(3): + r = self.helper.send(room_id1, "message", tok=user1_tok) + expected_timeline.append(r["event_id"]) + + # Mark the room as *not* having been sent down, and create a new + # `from_token`. + connection_token = self.get_success( + self.sliding_sync_handler.connection_store.record_rooms( + sync_config, from_token, sent_room_ids=[], unsent_room_ids=[room_id1] + ) + ) + + from_token = SlidingSyncStreamToken( + self.event_sources.get_current_token(), connection_token + ) + + # We should also receive new messages + for _ in range(2): + r = self.helper.send(room_id1, "message", tok=user1_tok) + expected_timeline.append(r["event_id"]) + + to_token = self.event_sources.get_current_token() + + result = self.get_success( + self.sliding_sync_handler.get_room_sync_data( + sync_config, + room_id=room_id1, + room_sync_config=room_sync_config, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + from_token=from_token, + to_token=to_token, + ) + ) + + self.assertFalse(result.initial) + self.assertTrue(result.limited) + self.assertEqual( + [e.event_id for e in result.timeline_events], expected_timeline + ) + + def test_room_sync_data_incremental_never(self) -> None: + """Test getting room data where we have not previously sent down the room, + so its state is considered NEVER. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + sync_config, room_sync_config, room_membership_for_user_at_to_token = ( + self._create_sync_configs(user1_id, room_id1) + ) + + # We expect to see these messages even though they're before the + # `from_token`, as the room has not been sent down. + expected_timeline = [] + for _ in range(2): + r = self.helper.send(room_id1, "message", tok=user1_tok) + expected_timeline.append(r["event_id"]) + + # Create a new `from_token`. + connection_token = self.get_success( + self.sliding_sync_handler.connection_store.record_rooms( + sync_config, None, sent_room_ids=[], unsent_room_ids=[] + ) + ) + from_token = SlidingSyncStreamToken( + self.event_sources.get_current_token(), connection_token + ) + + # We should also receive new messages + for _ in range(3): + r = self.helper.send(room_id1, "message", tok=user1_tok) + expected_timeline.append(r["event_id"]) + + to_token = self.event_sources.get_current_token() + + result = self.get_success( + self.sliding_sync_handler.get_room_sync_data( + sync_config, + room_id=room_id1, + room_sync_config=room_sync_config, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + from_token=from_token, + to_token=to_token, + ) + ) + + self.assertTrue(result.initial) + self.assertTrue(result.limited) + self.assertEqual( + [e.event_id for e in result.timeline_events], expected_timeline + ) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 2a27571929..130c225176 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -21,7 +21,7 @@ import json import logging from http import HTTPStatus -from typing import Any, Dict, Iterable, List +from typing import Any, Dict, Iterable, List, Optional, Tuple from parameterized import parameterized, parameterized_class @@ -66,6 +66,7 @@ from tests.federation.transport.test_knocking import ( ) from tests.server import FakeChannel, TimedOutException from tests.test_utils.event_injection import mark_event_as_partial_state +from tests.unittest import skip_unless logger = logging.getLogger(__name__) @@ -1120,12 +1121,11 @@ class DeviceUnusedFallbackKeySyncTestCase(unittest.HomeserverTestCase): self.assertEqual(res, []) # Upload a fallback key for the user/device - fallback_key = {"alg1:k1": "fallback_key1"} self.get_success( self.e2e_keys_handler.upload_keys_for_user( alice_user_id, test_device_id, - {"fallback_keys": fallback_key}, + {"fallback_keys": {"alg1:k1": "fallback_key1"}}, ) ) # We should now have an unused alg1 key @@ -1232,7 +1232,43 @@ class ExcludeRoomTestCase(unittest.HomeserverTestCase): self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"]) -class SlidingSyncTestCase(unittest.HomeserverTestCase): +class SlidingSyncBase(unittest.HomeserverTestCase): + """Base class for sliding sync test cases""" + + sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + + def do_sync( + self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str + ) -> Tuple[JsonDict, str]: + """Do a sliding sync request with given body. + + Asserts the request was successful. + + Attributes: + sync_body: The full request body to use + since: Optional since token + tok: Access token to use + + Returns: + A tuple of the response body and the `pos` field. + """ + + sync_path = self.sync_endpoint + if since: + sync_path += f"?pos={since}" + + channel = self.make_request( + method="POST", + path=sync_path, + content=sync_body, + access_token=tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + return channel.json_body, channel.json_body["pos"] + + +class SlidingSyncTestCase(SlidingSyncBase): """ Tests regarding MSC3575 Sliding Sync `/sync` endpoint. """ @@ -1253,12 +1289,10 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.sync_endpoint = ( - "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" - ) - self.store = hs.get_datastores().main self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() def _assertRequiredStateIncludes( self, @@ -1384,6 +1418,52 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): return room_id + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + def test_sync_list(self) -> None: """ Test that room IDs show up in the Sliding Sync `lists` @@ -1489,6 +1569,123 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): # with because we weren't able to find anything new yet. self.assertEqual(channel.json_body["pos"], future_position_token_serialized) + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the room with new events to trigger new results + event_response1 = self.helper.send( + room_id, "new activity in room", tok=user1_tok + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check to make sure the new event is returned + self.assertEqual( + [ + event["event_id"] + for event in channel.json_body["rooms"][room_id]["timeline"] + ], + [ + event_response1["event_id"], + ], + channel.json_body["rooms"][room_id]["timeline"], + ) + + # TODO: Once we remove `ops`, we should be able to add a `RoomResult.__bool__` to + # check if there are any updates since the `from_token`. + @skip_unless( + False, + "Once we remove ops from the Sliding Sync response, this test should pass", + ) + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + from_token = self.event_sources.get_current_token() + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + + "?timeout=10000" + + f"&pos={self.get_success(from_token.to_string(self.store))}", + { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + } + }, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # We still see rooms because that's how Sliding Sync lists work but we reached + # the timeout before seeing them + self.assertEqual( + [event["event_id"] for event in channel.json_body["rooms"].keys()], + [room_id], + ) + def test_filter_list(self) -> None: """ Test that filters apply to `lists` @@ -1515,11 +1712,11 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) # Create a normal room - room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id, user1_id, tok=user1_tok) # Create a room that user1 is invited to - invite_room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) # Make the Sliding Sync request @@ -2612,22 +2809,20 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): room_id1, "activity before token2", tok=user2_tok ) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } + # The `timeline_limit` is set to 4 so we can at least see one historical event + # before the `from_token`. We should see historical events because this is a + # `newly_joined` room. + timeline_limit = 4 + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": timeline_limit, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) # Join the room after the `from_token` which will make us consider this room as # `newly_joined`. @@ -2642,23 +2837,11 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): room_id1, "activity after token4", tok=user2_tok ) - # The `timeline_limit` is set to 4 so we can at least see one historical event - # before the `from_token`. We should see historical events because this is a - # `newly_joined` room. - timeline_limit = 4 # Make an incremental Sliding Sync request (what we're trying to test) channel = self.make_request( "POST", self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": timeline_limit, - } - } - }, + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -2835,22 +3018,16 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.helper.send(room_id1, "activity after invite3", tok=user2_tok) self.helper.send(room_id1, "activity after invite4", tok=user2_tok) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 3, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) self.helper.send(room_id1, "activity after token5", tok=user2_tok) self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) @@ -2859,15 +3036,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 3, - } - } - }, + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3106,22 +3275,17 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.helper.send(room_id1, "activity after invite3", tok=user2_tok) self.helper.send(room_id1, "activity after invite4", tok=user2_tok) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # Large enough to see the latest events and before the invite + "timeline_limit": 4, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) self.helper.send(room_id1, "activity after token5", tok=user2_tok) self.helper.send(room_id1, "activity after toekn6", tok=user2_tok) @@ -3130,16 +3294,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - # Large enough to see the latest events and before the invite - "timeline_limit": 4, - } - } - }, + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3285,22 +3440,16 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.helper.send(room_id1, "activity before2", tok=user2_tok) self.helper.join(room_id1, user1_id, tok=user1_tok) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 4, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) event_response3 = self.helper.send(room_id1, "activity after3", tok=user2_tok) event_response4 = self.helper.send(room_id1, "activity after4", tok=user2_tok) @@ -3317,15 +3466,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } - } - }, + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3376,22 +3517,16 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.helper.send(room_id1, "activity after3", tok=user2_tok) - channel = self.make_request( - "POST", - self.sync_endpoint, - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 4, } - }, - access_token=user1_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) self.helper.send(room_id1, "activity after4", tok=user2_tok) @@ -3399,15 +3534,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, - } - } - }, + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -3525,6 +3652,52 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id1, user1_id, tok=user1_tok) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.RoomHistoryVisibility, ""], + # This one doesn't exist in the room + [EventTypes.Tombstone, ""], + ], + "timeline_limit": 0, + } + } + } + _, after_room_token = self.do_sync(sync_body, tok=user1_tok) + + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?pos={after_room_token}", + content=sync_body, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # We only return updates but only if we've sent the room down the + # connection before. + self.assertIsNone(channel.json_body["rooms"][room_id1].get("required_state")) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_incremental_sync_restart(self) -> None: + """ + Test `rooms.required_state` returns requested state events in the room during an + incremental sync, after a restart (and so the in memory caches are reset). + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + channel = self.make_request( "POST", self.sync_endpoint, @@ -3542,6 +3715,9 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, channel.json_body) after_room_token = channel.json_body["pos"] + # Reset the in-memory cache + self.hs.get_sliding_sync_handler().connection_store._connections.clear() + # Make the Sliding Sync request channel = self.make_request( "POST", @@ -3564,9 +3740,20 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 200, channel.json_body) - # We only return updates but only if we've sent the room down the - # connection before. - self.assertNotIn(room_id1, channel.json_body["rooms"]) + # If the cache has been cleared then we do expect the state to come down + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + channel.json_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.RoomHistoryVisibility, "")], + }, + exact=True, + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) def test_rooms_required_state_wildcard(self) -> None: """ @@ -3792,18 +3979,44 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) - @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) - def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: + def test_rooms_required_state_me(self) -> None: """ - Test `rooms.required_state` should not return state past a leave/ban event. + Test `rooms.required_state` correctly handles $ME. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") user2_id = self.register_user("user2", "pass") user2_tok = self.login(user2_id, "pass") - user3_id = self.register_user("user3", "pass") - user3_tok = self.login(user3_id, "pass") + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + + # Also send normal state events with state keys of the users, first + # change the power levels to allow this. + self.helper.send_state( + room_id1, + event_type=EventTypes.PowerLevels, + body={"users": {user1_id: 50, user2_id: 100}}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo", + state_key=user1_id, + body={}, + tok=user1_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.foo", + state_key=user2_id, + body={}, + tok=user2_tok, + ) + + # Make the Sliding Sync request with a request for '$ME'. channel = self.make_request( "POST", self.sync_endpoint, @@ -3811,15 +4024,61 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): "lists": { "foo-list": { "ranges": [[0, 1]], - "required_state": [], - "timeline_limit": 4, + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.ME], + ["org.matrix.foo", StateValues.ME], + ], + "timeline_limit": 3, } } }, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user3 sent events in the 3 events we see in the `timeline` + self._assertRequiredStateIncludes( + channel.json_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + state_map[(EventTypes.Member, user1_id)], + state_map[("org.matrix.foo", user1_id)], + }, + exact=True, + ) + self.assertIsNone(channel.json_body["rooms"][room_id1].get("invite_state")) + + @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) + def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: + """ + Test `rooms.required_state` should not return state past a leave/ban event. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 3, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id1, user1_id, tok=user1_tok) @@ -3858,19 +4117,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): channel = self.make_request( "POST", self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "foo-list": { - "ranges": [[0, 1]], - "required_state": [ - [EventTypes.Create, ""], - [EventTypes.Member, "*"], - ["org.matrix.foo_state", ""], - ], - "timeline_limit": 3, - } - } - }, + content=sync_body, access_token=user1_tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -4407,7 +4654,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) def test_incremental_sync_full_state_new_room(self) -> None: - """Test that we get state all state in incremental sync for rooms that + """Test that we get all state in incremental sync for rooms that we haven't seen before. """ @@ -4501,7 +4748,7 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase): ) -class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): +class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): """Tests for the to-device sliding sync extension""" servlets = [ @@ -4519,10 +4766,59 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() self.sync_endpoint = ( "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" ) + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + def _assert_to_device_response( self, channel: FakeChannel, expected_messages: List[JsonDict] ) -> str: @@ -4686,3 +4982,601 @@ class SlidingSyncToDeviceExtensionTestCase(unittest.HomeserverTestCase): access_token=user1_tok, ) self._assert_to_device_response(channel, []) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", "d1") + user2_id = self.register_user("u2", "pass") + user2_tok = self.login(user2_id, "pass", "d2") + + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the to-device messages to trigger new results + test_msg = {"foo": "bar"} + send_to_device_channel = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user1_id: {"d1": test_msg}}}, + access_token=user2_tok, + ) + self.assertEqual( + send_to_device_channel.code, 200, send_to_device_channel.result + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + self._assert_to_device_response( + channel, + [{"content": test_msg, "sender": user2_id, "type": "m.test"}], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the To-Device extension doesn't trigger a false-positive for new data. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "to_device": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + self._assert_to_device_response(channel, []) + + +class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): + """Tests for the e2ee sliding sync extension""" + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + devices.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Enable sliding sync + config["experimental_features"] = {"msc3575_enabled": True} + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.account_data_handler = hs.get_account_data_handler() + self.notifier = hs.get_notifier() + self.sync_endpoint = ( + "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + ) + + def _bump_notifier_wait_for_events(self, user_id: str) -> None: + """ + Wake-up a `notifier.wait_for_events(user_id)` call without affecting the Sliding + Sync results. + """ + # We're expecting some new activity from this point onwards + from_token = self.event_sources.get_current_token() + + triggered_notifier_wait_for_events = False + + async def _on_new_acivity( + before_token: StreamToken, after_token: StreamToken + ) -> bool: + nonlocal triggered_notifier_wait_for_events + triggered_notifier_wait_for_events = True + return True + + # Listen for some new activity for the user. We're just trying to confirm that + # our bump below actually does what we think it does (triggers new activity for + # the user). + result_awaitable = self.notifier.wait_for_events( + user_id, + 1000, + _on_new_acivity, + from_token=from_token, + ) + + # Update the account data so that `notifier.wait_for_events(...)` wakes up. + # We're bumping account data because it won't show up in the Sliding Sync + # response so it won't affect whether we have results. + self.get_success( + self.account_data_handler.add_account_data_for_user( + user_id, + "org.matrix.foobarbaz", + {"foo": "bar"}, + ) + ) + + # Wait for our notifier result + self.get_success(result_awaitable) + + if not triggered_notifier_wait_for_events: + raise AssertionError( + "Expected `notifier.wait_for_events(...)` to be triggered" + ) + + def test_no_data_initial_sync(self) -> None: + """ + Test that enabling e2ee extension works during an intitial sync, even if there + is no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Make an initial Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list updates are only present for incremental syncs + self.assertIsNone(channel.json_body["extensions"]["e2ee"].get("device_lists")) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # This is always present because of + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_no_data_incremental_sync(self) -> None: + """ + Test that enabling e2ee extension works during an incremental sync, even if + there is no-data + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make an incremental Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list shows up for incremental syncs + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_wait_for_new_data(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive. + + (Only applies to incremental syncs with a `timeout` specified) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + test_device_id = "TESTDEVICE" + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass", device_id=test_device_id) + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + "?timeout=10000" + f"&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Bump the device lists to trigger new results + # Have user3 update their device list + device_update_channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=user3_tok, + ) + self.assertEqual( + device_update_channel.code, 200, device_update_channel.json_body + ) + # Should respond before the 10 second timeout + channel.await_result(timeout_ms=3000) + self.assertEqual(channel.code, 200, channel.json_body) + + # We should see the device list update + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [user3_id], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + def test_wait_for_new_data_timeout(self) -> None: + """ + Test to make sure that the Sliding Sync request waits for new data to arrive but + no data ever arrives so we timeout. We're also making sure that the default data + from the E2EE extension doesn't trigger a false-positive for new data (see + `device_one_time_keys_count.signed_curve25519`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make the Sliding Sync request + channel = self.make_request( + "POST", + self.sync_endpoint + f"?timeout=10000&pos={from_token}", + content=sync_body, + access_token=user1_tok, + await_result=False, + ) + # Block for 5 seconds to make sure we are `notifier.wait_for_events(...)` + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=5000) + # Wake-up `notifier.wait_for_events(...)` that will cause us test + # `SlidingSyncResult.__bool__` for new results. + self._bump_notifier_wait_for_events(user1_id) + # Block for a little bit more to ensure we don't see any new results. + with self.assertRaises(TimedOutException): + channel.await_result(timeout_ms=4000) + # Wait for the sync to complete (wait for the rest of the 10 second timeout, + # 5000 + 4000 + 1200 > 10000) + channel.await_result(timeout_ms=1200) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device lists are present for incremental syncs but empty because no device changes + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [], + ) + + # Both of these should be present even when empty + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_one_time_keys_count"], + { + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0 + }, + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"]["device_unused_fallback_key_types"], + [], + ) + + def test_device_lists(self) -> None: + """ + Test that device list updates are included in the response + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + test_device_id = "TESTDEVICE" + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass", device_id=test_device_id) + + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.join(room_id, user3_id, tok=user3_tok) + self.helper.join(room_id, user4_id, tok=user4_tok) + + sync_body = { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Have user3 update their device list + channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=user3_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # User4 leaves the room + self.helper.leave(room_id, user4_id, tok=user4_tok) + + # Make an incremental Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint + f"?pos={from_token}", + content=sync_body, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Device list updates show up + self.assertEqual( + channel.json_body["extensions"]["e2ee"] + .get("device_lists", {}) + .get("changed"), + [user3_id], + ) + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_lists", {}).get("left"), + [user4_id], + ) + + def test_device_one_time_keys_count(self) -> None: + """ + Test that `device_one_time_keys_count` are included in the response + """ + test_device_id = "TESTDEVICE" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", device_id=test_device_id) + + # Upload one time keys for the user/device + keys: JsonDict = { + "alg1:k1": "key1", + "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}}, + "alg2:k3": {"key": "key3"}, + } + upload_keys_response = self.get_success( + self.e2e_keys_handler.upload_keys_for_user( + user1_id, test_device_id, {"one_time_keys": keys} + ) + ) + self.assertDictEqual( + upload_keys_response, + { + "one_time_key_counts": { + "alg1": 1, + "alg2": 2, + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0, + } + }, + ) + + # Make a Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check for those one time key counts + self.assertEqual( + channel.json_body["extensions"]["e2ee"].get("device_one_time_keys_count"), + { + "alg1": 1, + "alg2": 2, + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + "signed_curve25519": 0, + }, + ) + + def test_device_unused_fallback_key_types(self) -> None: + """ + Test that `device_unused_fallback_key_types` are included in the response + """ + test_device_id = "TESTDEVICE" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass", device_id=test_device_id) + + # We shouldn't have any unused fallback keys yet + res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) + ) + self.assertEqual(res, []) + + # Upload a fallback key for the user/device + self.get_success( + self.e2e_keys_handler.upload_keys_for_user( + user1_id, + test_device_id, + {"fallback_keys": {"alg1:k1": "fallback_key1"}}, + ) + ) + # We should now have an unused alg1 key + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user1_id, test_device_id) + ) + self.assertEqual(fallback_res, ["alg1"], fallback_res) + + # Make a Sliding Sync request with the e2ee extension enabled + channel = self.make_request( + "POST", + self.sync_endpoint, + { + "lists": {}, + "extensions": { + "e2ee": { + "enabled": True, + } + }, + }, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check for the unused fallback key types + self.assertListEqual( + channel.json_body["extensions"]["e2ee"].get( + "device_unused_fallback_key_types" + ), + ["alg1"], + ) diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py index 5d38718a50..af1199ef8a 100644 --- a/tests/util/test_stream_change_cache.py +++ b/tests/util/test_stream_change_cache.py @@ -249,5 +249,5 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): self.assertEqual(cache.get_max_pos_of_last_change("bar@baz.net"), 3) self.assertEqual(cache.get_max_pos_of_last_change("user@elsewhere.org"), 4) - # Unknown entities will return the stream start position. - self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), 1) + # Unknown entities will return None + self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None) |