summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--flake.lock62
-rwxr-xr-xflake.nix6
-rw-r--r--host/Rory-desktop/configuration.nix1
-rw-r--r--host/Rory-desktop/edu/mongodb.nix20
-rw-r--r--host/Rory-nginx/services/matrix/synapse/workers/event-creator.nix1
-rw-r--r--packages/overlays/matrix-synapse/patches/0001-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch (renamed from packages/overlays/matrix-synapse/patches/0045-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0001-Move-GET-devices-off-main-process-18355.patch98
-rw-r--r--packages/overlays/matrix-synapse/patches/0002-Allow-rooms-admin-API-to-be-on-workers-18360.patch80
-rw-r--r--packages/overlays/matrix-synapse/patches/0002-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch (renamed from packages/overlays/matrix-synapse/patches/0046-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0003-Bump-sigstore-cosign-installer-from-3.8.1-to-3.8.2-1.patch26
-rw-r--r--packages/overlays/matrix-synapse/patches/0003-Remove-newline-from-final-bullet-point-of-PR-templat.patch (renamed from packages/overlays/matrix-synapse/patches/0047-Remove-newline-from-final-bullet-point-of-PR-templat.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0004-Bump-actions-add-to-project-from-280af8ae1f83a494cfa.patch27
-rw-r--r--packages/overlays/matrix-synapse/patches/0004-Explicitly-enable-pypy-for-cibuildwheel-18417.patch (renamed from packages/overlays/matrix-synapse/patches/0048-Explicitly-enable-pypy-for-cibuildwheel-18417.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0005-Bump-actions-download-artifact-from-4.2.1-to-4.3.0-1.patch26
-rw-r--r--packages/overlays/matrix-synapse/patches/0005-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch (renamed from packages/overlays/matrix-synapse/patches/0049-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0006-Bump-stefanzweifel-git-auto-commit-action-from-5.1.0.patch25
-rw-r--r--packages/overlays/matrix-synapse/patches/0006-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch (renamed from packages/overlays/matrix-synapse/patches/0050-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0007-Bump-anyhow-from-1.0.97-to-1.0.98-18336.patch28
-rw-r--r--packages/overlays/matrix-synapse/patches/0007-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch (renamed from packages/overlays/matrix-synapse/patches/0051-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0008-Add-option-to-allow-registrations-that-begin-with-_-.patch (renamed from packages/overlays/matrix-synapse/patches/0052-Add-option-to-allow-registrations-that-begin-with-_-.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0008-Bump-pyo3-log-from-0.12.2-to-0.12.3-18317.patch28
-rw-r--r--packages/overlays/matrix-synapse/patches/0009-Bump-types-psycopg2-from-2.9.21.20250121-to-2.9.21.2.patch92
-rw-r--r--packages/overlays/matrix-synapse/patches/0009-remove-room-without-listeners-from-Notifier.room_to_.patch (renamed from packages/overlays/matrix-synapse/patches/0054-remove-room-without-listeners-from-Notifier.room_to_.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0010-Bump-pyopenssl-from-24.3.0-to-25.0.0-18315.patch39
-rw-r--r--packages/overlays/matrix-synapse/patches/0010-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch (renamed from packages/overlays/matrix-synapse/patches/0055-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0011-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch (renamed from packages/overlays/matrix-synapse/patches/0056-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0011-Bump-types-jsonschema-from-4.23.0.20240813-to-4.23.0.patch35
-rw-r--r--packages/overlays/matrix-synapse/patches/0012-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch (renamed from packages/overlays/matrix-synapse/patches/0057-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0012-Bump-softprops-action-gh-release-from-1-to-2-18264.patch25
-rw-r--r--packages/overlays/matrix-synapse/patches/0013-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch (renamed from packages/overlays/matrix-synapse/patches/0058-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0013-Do-not-retry-push-during-backoff-period-18363.patch128
-rw-r--r--packages/overlays/matrix-synapse/patches/0014-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch (renamed from packages/overlays/matrix-synapse/patches/0059-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0014-Slight-performance-increase-when-using-the-ratelimit.patch123
-rw-r--r--packages/overlays/matrix-synapse/patches/0015-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch (renamed from packages/overlays/matrix-synapse/patches/0060-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0015-Minor-performance-improvements-to-notifier-replicati.patch116
-rw-r--r--packages/overlays/matrix-synapse/patches/0016-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch (renamed from packages/overlays/matrix-synapse/patches/0061-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0016-Fix-typo-in-docs-about-push-18320.patch34
-rw-r--r--packages/overlays/matrix-synapse/patches/0017-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch (renamed from packages/overlays/matrix-synapse/patches/0062-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0017-Optimize-Dockerfile-workers-18292.patch138
-rw-r--r--packages/overlays/matrix-synapse/patches/0018-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch (renamed from packages/overlays/matrix-synapse/patches/0064-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0018-configure_workers_and_start.py-unify-python-path-182.patch73
-rw-r--r--packages/overlays/matrix-synapse/patches/0019-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch (renamed from packages/overlays/matrix-synapse/patches/0065-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0019-docker-use-shebangs-to-invoke-generated-scripts-1829.patch100
-rw-r--r--packages/overlays/matrix-synapse/patches/0020-Update-postgres.md-18445.patch (renamed from packages/overlays/matrix-synapse/patches/0066-Update-postgres.md-18445.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0020-start_for_complement.sh-use-more-shell-builtins-1829.patch91
-rw-r--r--packages/overlays/matrix-synapse/patches/0021-Added-Pocket-ID-to-openid.md-18237.patch67
-rw-r--r--packages/overlays/matrix-synapse/patches/0021-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch (renamed from packages/overlays/matrix-synapse/patches/0067-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0022-Add-a-unit-test-for-the-phone-home-stats-18463.patch (renamed from packages/overlays/matrix-synapse/patches/0070-Add-a-unit-test-for-the-phone-home-stats-18463.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0022-docs-workers.md-Add-_matrix-federation-v1-event-to-l.patch69
-rw-r--r--packages/overlays/matrix-synapse/patches/0023-Add-an-Admin-API-endpoint-to-fetch-scheduled-tasks-1.patch383
-rw-r--r--packages/overlays/matrix-synapse/patches/0023-Include-room-ID-in-room-deletion-status-response-183.patch (renamed from packages/overlays/matrix-synapse/patches/0071-Include-room-ID-in-room-deletion-status-response-183.patch)2
-rw-r--r--packages/overlays/matrix-synapse/patches/0024-Policy-server-part-1-Actually-call-the-policy-server.patch666
-rw-r--r--packages/overlays/matrix-synapse/patches/0024-Readme-tweaks-18218.patch48
-rw-r--r--packages/overlays/matrix-synapse/patches/0025-Bump-pyo3-from-0.23.5-to-0.24.2-18460.patch166
-rw-r--r--packages/overlays/matrix-synapse/patches/0025-Do-not-auto-provision-missing-users-devices-when-del.patch129
-rw-r--r--packages/overlays/matrix-synapse/patches/0026-Bump-Tornado-from-6.4.2-to-6.5.0-18459.patch93
-rw-r--r--packages/overlays/matrix-synapse/patches/0026-Fix-typo-in-doc-for-Scheduled-Tasks-Admin-API-18384.patch34
-rw-r--r--packages/overlays/matrix-synapse/patches/0027-Don-t-check-the-at_hash-access-token-hash-in-OIDC-ID.patch177
-rw-r--r--packages/overlays/matrix-synapse/patches/0027-Don-t-move-invited-users-to-new-room-when-shutting-d.patch118
-rw-r--r--packages/overlays/matrix-synapse/patches/0028-Fix-lint-which-broke-in-18374-18385.patch37
-rw-r--r--packages/overlays/matrix-synapse/patches/0028-fix-device-handler-make-_maybe_retry_device_resync-t.patch94
-rw-r--r--packages/overlays/matrix-synapse/patches/0029-Apply-should_drop_federated_event-to-federation-invi.patch54
-rw-r--r--packages/overlays/matrix-synapse/patches/0029-Hotfix-ignore-rejected-events-in-delayed_events.patch (renamed from packages/overlays/matrix-synapse/patches/0072-Hotfix-ignore-rejected-events-in-delayed_events.patch)4
-rw-r--r--packages/overlays/matrix-synapse/patches/0030-Add-too-much-logging-to-room-summary-over-federation.patch (renamed from packages/overlays/matrix-synapse/patches/0073-Add-too-much-logging-to-room-summary-over-federation.patch)4
-rw-r--r--packages/overlays/matrix-synapse/patches/0030-Allow-a-few-admin-APIs-used-by-MAS-to-run-on-workers.patch699
-rw-r--r--packages/overlays/matrix-synapse/patches/0031-Add-the-ability-to-exclude-remote-users-in-user-dire.patch247
-rw-r--r--packages/overlays/matrix-synapse/patches/0031-Log-entire-room-if-accessibility-check-fails.patch (renamed from packages/overlays/matrix-synapse/patches/0074-Log-entire-room-if-accessibility-check-fails.patch)4
-rw-r--r--packages/overlays/matrix-synapse/patches/0032-Log-policy-server-rejected-events.patch30
-rw-r--r--packages/overlays/matrix-synapse/patches/0032-Return-specific-error-code-when-email-phone-not-supp.patch118
-rw-r--r--packages/overlays/matrix-synapse/patches/0033-Use-parse_boolean-for-unredacted-content.patch29
-rw-r--r--packages/overlays/matrix-synapse/patches/0034-Ensure-the-url-previewer-also-hashes-and-quarantines.patch87
-rw-r--r--packages/overlays/matrix-synapse/patches/0034-Expose-tombstone-in-room-admin-api.patch139
-rw-r--r--packages/overlays/matrix-synapse/patches/0035-Convert-Sliding-Sync-tests-to-use-higher-level-compu.patch2816
-rw-r--r--packages/overlays/matrix-synapse/patches/0036-Pass-leave-from-remote-invite-rejection-down-Sliding.patch537
-rw-r--r--packages/overlays/matrix-synapse/patches/0037-Bump-sha2-from-0.10.8-to-0.10.9-18395.patch28
-rw-r--r--packages/overlays/matrix-synapse/patches/0038-Bump-txredisapi-from-1.4.10-to-1.4.11-18392.patch35
-rw-r--r--packages/overlays/matrix-synapse/patches/0039-Bump-packaging-from-24.2-to-25.0-18393.patch34
-rw-r--r--packages/overlays/matrix-synapse/patches/0040-Bump-pydantic-from-2.10.3-to-2.11.4-18394.patch279
-rw-r--r--packages/overlays/matrix-synapse/patches/0041-Bump-actions-setup-go-from-5.4.0-to-5.5.0-18426.patch54
-rw-r--r--packages/overlays/matrix-synapse/patches/0042-Bump-pillow-from-11.1.0-to-11.2.1-18429.patch191
-rw-r--r--packages/overlays/matrix-synapse/patches/0043-1.130.0rc1.patch351
-rw-r--r--packages/overlays/matrix-synapse/patches/0044-Fix-up-changelog.patch43
-rw-r--r--packages/overlays/matrix-synapse/patches/0053-Move-index-creation-to-background-update-18439.patch63
-rw-r--r--packages/overlays/matrix-synapse/patches/0063-Fix-up-the-topological-ordering-for-events-above-MAX.patch342
-rw-r--r--packages/overlays/matrix-synapse/patches/0068-1.130.0.patch77
-rw-r--r--packages/overlays/matrix-synapse/patches/0069-Tweak-changelog.patch25
86 files changed, 1419 insertions, 8420 deletions
diff --git a/flake.lock b/flake.lock

index 90aeaca..5a8ddad 100644 --- a/flake.lock +++ b/flake.lock
@@ -225,11 +225,11 @@ "draupnirSrc": { "flake": false, "locked": { - "lastModified": 1747652125, - "narHash": "sha256-eUAOWxlloj5mUlosNyP3iKXpohrUE1+d9M3JM4zJuyU=", + "lastModified": 1748199505, + "narHash": "sha256-VsY0U93poNz3HbVMGBKx3LTsLceA5koE6L3N0ximjOY=", "owner": "the-draupnir-project", "repo": "Draupnir", - "rev": "8ccbe86d199319894276f447d292d810853f9bd5", + "rev": "120111f075660a549d8610dab548cc930bdc26a7", "type": "github" }, "original": { @@ -569,11 +569,11 @@ "nixpkgs": "nixpkgs_6" }, "locked": { - "lastModified": 1747747328, - "narHash": "sha256-3Gc5CqAJqpvI4gIU1Oxbl5w440b+rY9HvDzs5C0ChBA=", + "lastModified": 1748227609, + "narHash": "sha256-SaSdslyo6UGDpPUlmrPA4dWOEuxCy2ihRN9K6BnqYsA=", "owner": "nix-community", "repo": "home-manager", - "rev": "65d2282ff6cf560f54997013bd1e575fbd0a7ebf", + "rev": "d23d20f55d49d8818ac1f1b2783671e8a6725022", "type": "github" }, "original": { @@ -904,11 +904,11 @@ "matrixSpecSrc": { "flake": false, "locked": { - "lastModified": 1747148958, - "narHash": "sha256-HCk1dTvaDY+W2tg3uP0feSeZfcOaWyTklwZFc2E8zUU=", + "lastModified": 1747842182, + "narHash": "sha256-ma8ShKCKqm9NkxZ/iAF5jSN+Avir23gY1fh+lZVBNNs=", "owner": "matrix-org", "repo": "matrix-spec", - "rev": "67743d5715071afa4a2b8553321dccf5339a330d", + "rev": "2c734c3c5b1aad5f949302e3ca9c86dc3a2c05e6", "type": "github" }, "original": { @@ -921,11 +921,11 @@ "mtxclientSrc": { "flake": false, "locked": { - "lastModified": 1747613595, - "narHash": "sha256-NthiRLkYoTXM65tndD8cDD1L+zbywzmGbyqwsa/KYu8=", + "lastModified": 1748174505, + "narHash": "sha256-0ASLFpWZ0Gd0bVg45+LCs9IcSOrnxD2fx7Vi7RJSLOU=", "owner": "Nheko-reborn", "repo": "mtxclient", - "rev": "a9578b8a697074b9c86118ca75b34bdb01ba82d2", + "rev": "8e1c3814542b3b4088c1ffa6c88ec8583c928fc5", "type": "github" }, "original": { @@ -938,11 +938,11 @@ "nhekoSrc": { "flake": false, "locked": { - "lastModified": 1746136083, - "narHash": "sha256-4K8+482xIfIWn0n3i0LjtSTii4bH+YLfMLuGIwtvDbA=", + "lastModified": 1748173079, + "narHash": "sha256-uI5tBldTT7NrhX/+t9/R/4sGlHG/3EjBryeZpojgxTc=", "owner": "Nheko-reborn", "repo": "nheko", - "rev": "ad19bf3a308de121a832562ade8e7b470d1f809a", + "rev": "978174af774f99bb70df2ad5307ae161be6190ff", "type": "github" }, "original": { @@ -1077,11 +1077,11 @@ }, "nixpkgs-RoryNix": { "locked": { - "lastModified": 1747748004, - "narHash": "sha256-3S4SJNbJro2pFVXxROvyu8fb9yOTDzSaWfSOb50rHZw=", + "lastModified": 1748302001, + "narHash": "sha256-VRxJOOI5A0NN2RgOjOGVkd/+MuzQbKpMKs5yoVwR1Io=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "8db79028d5614a0d3f5ce020d6b7cd7ff597e32d", + "rev": "d9dd002a294e676c49e27eae9166f73bd2ae5684", "type": "github" }, "original": { @@ -1093,11 +1093,11 @@ }, "nixpkgs-master": { "locked": { - "lastModified": 1747748004, - "narHash": "sha256-3S4SJNbJro2pFVXxROvyu8fb9yOTDzSaWfSOb50rHZw=", + "lastModified": 1748302001, + "narHash": "sha256-VRxJOOI5A0NN2RgOjOGVkd/+MuzQbKpMKs5yoVwR1Io=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "8db79028d5614a0d3f5ce020d6b7cd7ff597e32d", + "rev": "d9dd002a294e676c49e27eae9166f73bd2ae5684", "type": "github" }, "original": { @@ -1141,11 +1141,11 @@ }, "nixpkgs-stable_3": { "locked": { - "lastModified": 1747485343, - "narHash": "sha256-YbsZyuRE1tobO9sv0PUwg81QryYo3L1F3R3rF9bcG38=", + "lastModified": 1748037224, + "narHash": "sha256-92vihpZr6dwEMV6g98M5kHZIttrWahb9iRPBm1atcPk=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9b5ac7ad45298d58640540d0323ca217f32a6762", + "rev": "f09dede81861f3a83f7f06641ead34f02f37597f", "type": "github" }, "original": { @@ -1237,11 +1237,11 @@ }, "nixpkgs_6": { "locked": { - "lastModified": 1747327360, - "narHash": "sha256-LSmTbiq/nqZR9B2t4MRnWG7cb0KVNU70dB7RT4+wYK4=", + "lastModified": 1748026106, + "narHash": "sha256-6m1Y3/4pVw1RWTsrkAK2VMYSzG4MMIj7sqUy7o8th1o=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "e06158e58f3adee28b139e9c2bcfcc41f8625b46", + "rev": "063f43f2dbdef86376cc29ad646c45c46e93234c", "type": "github" }, "original": { @@ -1269,11 +1269,11 @@ }, "nixpkgs_8": { "locked": { - "lastModified": 1747542820, - "narHash": "sha256-GaOZntlJ6gPPbbkTLjbd8BMWaDYafhuuYRNrxCGnPJw=", + "lastModified": 1748190013, + "narHash": "sha256-R5HJFflOfsP5FBtk+zE8FpL8uqE7n62jqOsADvVshhE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "292fa7d4f6519c074f0a50394dbbe69859bb6043", + "rev": "62b852f6c6742134ade1abdd2a21685fd617a291", "type": "github" }, "original": { @@ -1305,7 +1305,7 @@ }, "locked": { "lastModified": 1737779835, - "narHash": "sha256-iZ/kQ/XFqIx053AuSHhCwu3HA8627ognYiJl/LRNpD0=", + "narHash": "sha256-TY7cnYqhgxIXZCltcFxYuKQ6Hpt3gouuYn0rj9URsp4=", "ref": "refs/heads/master", "rev": "11cc65efa2909bdc7e3e978bf1f56f6d141bf82a", "revCount": 11, diff --git a/flake.nix b/flake.nix
index 3d3c070..5329dee 100755 --- a/flake.nix +++ b/flake.nix
@@ -157,7 +157,7 @@ nixpkgs.overlays = [ (final: prev: { matrix-synapse-unwrapped = inputs.nixpkgs-master.legacyPackages.${pkgs.stdenv.hostPlatform.system}.matrix-synapse-unwrapped.overrideAttrs (oldAttrs: rec { - patches = oldAttrs.patches ++ lib.map ( + patches = (if oldAttrs ? patches then oldAttrs.patches else []) ++ lib.map ( path: ./packages/overlays/matrix-synapse/patches/${path} ) (builtins.attrNames (builtins.readDir ./packages/overlays/matrix-synapse/patches)); @@ -165,9 +165,7 @@ inherit (oldAttrs) src; inherit patches; name = "${oldAttrs.pname}-${oldAttrs.version}"; - #hash = "sha256-PdAyEGLYmMLgcPQjzjuwvQo55olKgr079gsgQnUoKTM="; - hash = "sha256-leYnFxIlNnpjaIbfVRphFVpqWmueTkvHeNU8sFFxUeI="; - #hash = ""; + hash = "sha256-9VJnn8aPkShqK2wYGFr+S5koIjma7VOr+LkLXwStL1E="; }; }); #draupnir = inputs.nixpkgs-master.legacyPackages.${pkgs.stdenv.hostPlatform.system}.draupnir; diff --git a/host/Rory-desktop/configuration.nix b/host/Rory-desktop/configuration.nix
index d93b66d..e16cb7e 100644 --- a/host/Rory-desktop/configuration.nix +++ b/host/Rory-desktop/configuration.nix
@@ -26,6 +26,7 @@ args@{ #./optional/gui/x11.nix ./optional/gui/wayland.nix + ./edu/mongodb.nix # ./printing.nix # ./ollama.nix diff --git a/host/Rory-desktop/edu/mongodb.nix b/host/Rory-desktop/edu/mongodb.nix new file mode 100644
index 0000000..92ffc6a --- /dev/null +++ b/host/Rory-desktop/edu/mongodb.nix
@@ -0,0 +1,20 @@ +{ + config, + pkgs, + lib, + ... +}: + +{ + services.mongodb = { + enable = true; + package = pkgs.mongodb-ce; + enableAuth = true; + initialRootPasswordFile = "/etc/mongo-pass"; + #bind_ip = "/run/mongodb.sock"; + extraConfig = '' + net.unixDomainSocket.filePermissions: 0777 + ''; + }; + +} diff --git a/host/Rory-nginx/services/matrix/synapse/workers/event-creator.nix b/host/Rory-nginx/services/matrix/synapse/workers/event-creator.nix
index e6d81ce..1c08d4b 100644 --- a/host/Rory-nginx/services/matrix/synapse/workers/event-creator.nix +++ b/host/Rory-nginx/services/matrix/synapse/workers/event-creator.nix
@@ -13,6 +13,7 @@ let "~ ^/_matrix/client/(api/v1|r0|v3|unstable)/join/" "~ ^/_matrix/client/(api/v1|r0|v3|unstable)/knock/" "~ ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/" + "~ ^/_synapse/admin/v1/rooms" # We have a lot of them, so let's do a bunch of jobs at once! ]; federation = [ ]; media = [ ]; diff --git a/packages/overlays/matrix-synapse/patches/0045-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch b/packages/overlays/matrix-synapse/patches/0001-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch
index b448370..22757b6 100644 --- a/packages/overlays/matrix-synapse/patches/0045-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch +++ b/packages/overlays/matrix-synapse/patches/0001-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch
@@ -1,7 +1,7 @@ From c626d54cea3a99200c162a2578550e56242e8213 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 May 2025 15:12:22 +0100 -Subject: [PATCH 45/74] Bump mypy-zope from 1.0.9 to 1.0.11 (#18428) +Subject: [PATCH 01/34] Bump mypy-zope from 1.0.9 to 1.0.11 (#18428) --- poetry.lock | 8 ++++---- diff --git a/packages/overlays/matrix-synapse/patches/0001-Move-GET-devices-off-main-process-18355.patch b/packages/overlays/matrix-synapse/patches/0001-Move-GET-devices-off-main-process-18355.patch deleted file mode 100644
index a89c307..0000000 --- a/packages/overlays/matrix-synapse/patches/0001-Move-GET-devices-off-main-process-18355.patch +++ /dev/null
@@ -1,98 +0,0 @@ -From 33824495ba520f57eae3687db847175b40f71d73 Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erikj@element.io> -Date: Fri, 25 Apr 2025 15:08:33 +0100 -Subject: [PATCH 01/74] Move GET /devices/ off main process (#18355) - -We can't move PUT/DELETE as they do need to happen on main process (due -to notification of device changes). - ---------- - -Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> ---- - changelog.d/18355.feature | 1 + - docs/workers.md | 1 + - synapse/rest/client/devices.py | 20 ++++++++++++++++++-- - 3 files changed, 20 insertions(+), 2 deletions(-) - create mode 100644 changelog.d/18355.feature - -diff --git a/changelog.d/18355.feature b/changelog.d/18355.feature -new file mode 100644 -index 0000000000..4813f0a291 ---- /dev/null -+++ b/changelog.d/18355.feature -@@ -0,0 +1 @@ -+Add support for handling `GET /devices/` on workers. -diff --git a/docs/workers.md b/docs/workers.md -index 08ee493da9..def902d24c 100644 ---- a/docs/workers.md -+++ b/docs/workers.md -@@ -280,6 +280,7 @@ Additionally, the following REST endpoints can be handled for GET requests: - - ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ - ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events -+ ^/_matrix/client/(api/v1|r0|v3|unstable)/devices/ - - # Account data requests - ^/_matrix/client/(r0|v3|unstable)/.*/tags -diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py -index 4607b23494..0b075cc2f2 100644 ---- a/synapse/rest/client/devices.py -+++ b/synapse/rest/client/devices.py -@@ -143,11 +143,11 @@ class DeviceRestServlet(RestServlet): - self.hs = hs - self.auth = hs.get_auth() - handler = hs.get_device_handler() -- assert isinstance(handler, DeviceHandler) - self.device_handler = handler - self.auth_handler = hs.get_auth_handler() - self._msc3852_enabled = hs.config.experimental.msc3852_enabled - self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled -+ self._is_main_process = hs.config.worker.worker_app is None - - async def on_GET( - self, request: SynapseRequest, device_id: str -@@ -179,6 +179,14 @@ class DeviceRestServlet(RestServlet): - async def on_DELETE( - self, request: SynapseRequest, device_id: str - ) -> Tuple[int, JsonDict]: -+ # Can only be run on main process, as changes to device lists must -+ # happen on main. -+ if not self._is_main_process: -+ error_message = "DELETE on /devices/ must be routed to main process" -+ logger.error(error_message) -+ raise SynapseError(500, error_message) -+ assert isinstance(self.device_handler, DeviceHandler) -+ - requester = await self.auth.get_user_by_req(request) - - try: -@@ -223,6 +231,14 @@ class DeviceRestServlet(RestServlet): - async def on_PUT( - self, request: SynapseRequest, device_id: str - ) -> Tuple[int, JsonDict]: -+ # Can only be run on main process, as changes to device lists must -+ # happen on main. -+ if not self._is_main_process: -+ error_message = "PUT on /devices/ must be routed to main process" -+ logger.error(error_message) -+ raise SynapseError(500, error_message) -+ assert isinstance(self.device_handler, DeviceHandler) -+ - requester = await self.auth.get_user_by_req(request, allow_guest=True) - - body = parse_and_validate_json_object_from_request(request, self.PutBody) -@@ -585,9 +601,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - ): - DeleteDevicesRestServlet(hs).register(http_server) - DevicesRestServlet(hs).register(http_server) -+ DeviceRestServlet(hs).register(http_server) - - if hs.config.worker.worker_app is None: -- DeviceRestServlet(hs).register(http_server) - if hs.config.experimental.msc2697_enabled: - DehydratedDeviceServlet(hs).register(http_server) - ClaimDehydratedDeviceServlet(hs).register(http_server) --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0002-Allow-rooms-admin-API-to-be-on-workers-18360.patch b/packages/overlays/matrix-synapse/patches/0002-Allow-rooms-admin-API-to-be-on-workers-18360.patch deleted file mode 100644
index 7cb9bd1..0000000 --- a/packages/overlays/matrix-synapse/patches/0002-Allow-rooms-admin-API-to-be-on-workers-18360.patch +++ /dev/null
@@ -1,80 +0,0 @@ -From 5b89c9264380da8f9cc55460f8215758fe570010 Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erikj@element.io> -Date: Fri, 25 Apr 2025 15:18:22 +0100 -Subject: [PATCH 02/74] Allow /rooms/ admin API to be on workers (#18360) - -Tested by https://github.com/matrix-org/sytest/pull/1400 ---- - changelog.d/18360.misc | 1 + - docs/workers.md | 1 + - synapse/app/generic_worker.py | 3 ++- - synapse/rest/admin/__init__.py | 5 +++-- - 4 files changed, 7 insertions(+), 3 deletions(-) - create mode 100644 changelog.d/18360.misc - -diff --git a/changelog.d/18360.misc b/changelog.d/18360.misc -new file mode 100644 -index 0000000000..e5bf4f536f ---- /dev/null -+++ b/changelog.d/18360.misc -@@ -0,0 +1 @@ -+Allow `/rooms/` admin API to be run on workers. -diff --git a/docs/workers.md b/docs/workers.md -index def902d24c..9ebcc886b1 100644 ---- a/docs/workers.md -+++ b/docs/workers.md -@@ -249,6 +249,7 @@ information. - ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$ - ^/_matrix/client/(r0|v3|unstable)/capabilities$ - ^/_matrix/client/(r0|v3|unstable)/notifications$ -+ ^/_synapse/admin/v1/rooms/ - - # Encryption requests - ^/_matrix/client/(r0|v3|unstable)/keys/query$ -diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py -index a528c3890d..e4120ed424 100644 ---- a/synapse/app/generic_worker.py -+++ b/synapse/app/generic_worker.py -@@ -52,7 +52,7 @@ from synapse.logging.context import LoggingContext - from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy - from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource - from synapse.rest import ClientRestResource --from synapse.rest.admin import register_servlets_for_media_repo -+from synapse.rest.admin import AdminRestResource, register_servlets_for_media_repo - from synapse.rest.health import HealthResource - from synapse.rest.key.v2 import KeyResource - from synapse.rest.synapse.client import build_synapse_client_resource_tree -@@ -190,6 +190,7 @@ class GenericWorkerServer(HomeServer): - - resources.update(build_synapse_client_resource_tree(self)) - resources["/.well-known"] = well_known_resource(self) -+ resources["/_synapse/admin"] = AdminRestResource(self) - - elif name == "federation": - resources[FEDERATION_PREFIX] = TransportLayerServer(self) -diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py -index f3c99663e8..5977ded4a0 100644 ---- a/synapse/rest/admin/__init__.py -+++ b/synapse/rest/admin/__init__.py -@@ -275,7 +275,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - """ - Register all the admin servlets. - """ -- # Admin servlets aren't registered on workers. -+ RoomRestServlet(hs).register(http_server) -+ -+ # Admin servlets below may not work on workers. - if hs.config.worker.worker_app is not None: - return - -@@ -283,7 +285,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - BlockRoomRestServlet(hs).register(http_server) - ListRoomRestServlet(hs).register(http_server) - RoomStateRestServlet(hs).register(http_server) -- RoomRestServlet(hs).register(http_server) - RoomRestV2Servlet(hs).register(http_server) - RoomMembersRestServlet(hs).register(http_server) - DeleteRoomStatusByDeleteIdRestServlet(hs).register(http_server) --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0046-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch b/packages/overlays/matrix-synapse/patches/0002-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch
index 744bae9..363c861 100644 --- a/packages/overlays/matrix-synapse/patches/0046-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch +++ b/packages/overlays/matrix-synapse/patches/0002-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch
@@ -1,7 +1,7 @@ From ba2f1be891a4dbc2fe55af968dd72a146a8c9068 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 May 2025 15:12:34 +0100 -Subject: [PATCH 46/74] Bump types-requests from 2.32.0.20241016 to +Subject: [PATCH 02/34] Bump types-requests from 2.32.0.20241016 to 2.32.0.20250328 (#18427) --- diff --git a/packages/overlays/matrix-synapse/patches/0003-Bump-sigstore-cosign-installer-from-3.8.1-to-3.8.2-1.patch b/packages/overlays/matrix-synapse/patches/0003-Bump-sigstore-cosign-installer-from-3.8.1-to-3.8.2-1.patch deleted file mode 100644
index 0507a50..0000000 --- a/packages/overlays/matrix-synapse/patches/0003-Bump-sigstore-cosign-installer-from-3.8.1-to-3.8.2-1.patch +++ /dev/null
@@ -1,26 +0,0 @@ -From 1482ad1917ef5e022b2d2238d30be74f50b47953 Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:05:43 +0100 -Subject: [PATCH 03/74] Bump sigstore/cosign-installer from 3.8.1 to 3.8.2 - (#18366) - ---- - .github/workflows/docker.yml | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml -index 052dcf800b..c617753c7a 100644 ---- a/.github/workflows/docker.yml -+++ b/.github/workflows/docker.yml -@@ -30,7 +30,7 @@ jobs: - run: docker buildx inspect - - - name: Install Cosign -- uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1 -+ uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 - - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0047-Remove-newline-from-final-bullet-point-of-PR-templat.patch b/packages/overlays/matrix-synapse/patches/0003-Remove-newline-from-final-bullet-point-of-PR-templat.patch
index 7163a97..2fdf9be 100644 --- a/packages/overlays/matrix-synapse/patches/0047-Remove-newline-from-final-bullet-point-of-PR-templat.patch +++ b/packages/overlays/matrix-synapse/patches/0003-Remove-newline-from-final-bullet-point-of-PR-templat.patch
@@ -1,7 +1,7 @@ From 480d4faa38401f37b0b5608356ee1959aa5829c8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 13 May 2025 15:14:00 +0100 -Subject: [PATCH 47/74] Remove newline from final bullet point of PR template +Subject: [PATCH 03/34] Remove newline from final bullet point of PR template (#18419) --- diff --git a/packages/overlays/matrix-synapse/patches/0004-Bump-actions-add-to-project-from-280af8ae1f83a494cfa.patch b/packages/overlays/matrix-synapse/patches/0004-Bump-actions-add-to-project-from-280af8ae1f83a494cfa.patch deleted file mode 100644
index 14d8061..0000000 --- a/packages/overlays/matrix-synapse/patches/0004-Bump-actions-add-to-project-from-280af8ae1f83a494cfa.patch +++ /dev/null
@@ -1,27 +0,0 @@ -From 2ff977a6c39caa24f35c58f2f5acd948dbdf122b Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:05:55 +0100 -Subject: [PATCH 04/74] Bump actions/add-to-project from - 280af8ae1f83a494cfad2cb10f02f6d13529caa9 to - 5b1a254a3546aef88e0a7724a77a623fa2e47c36 (#18365) - ---- - .github/workflows/triage_labelled.yml | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml -index feab5906e0..e506be393f 100644 ---- a/.github/workflows/triage_labelled.yml -+++ b/.github/workflows/triage_labelled.yml -@@ -11,7 +11,7 @@ jobs: - if: > - contains(github.event.issue.labels.*.name, 'X-Needs-Info') - steps: -- - uses: actions/add-to-project@280af8ae1f83a494cfad2cb10f02f6d13529caa9 # main (v1.0.2 + 10 commits) -+ - uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits) - id: add_project - with: - project-url: "https://github.com/orgs/matrix-org/projects/67" --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0048-Explicitly-enable-pypy-for-cibuildwheel-18417.patch b/packages/overlays/matrix-synapse/patches/0004-Explicitly-enable-pypy-for-cibuildwheel-18417.patch
index bd1678c..a85de14 100644 --- a/packages/overlays/matrix-synapse/patches/0048-Explicitly-enable-pypy-for-cibuildwheel-18417.patch +++ b/packages/overlays/matrix-synapse/patches/0004-Explicitly-enable-pypy-for-cibuildwheel-18417.patch
@@ -1,7 +1,7 @@ From 2db54c88ff54a5377d96088c23ac1f4dfef8faf3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 13 May 2025 15:19:30 +0100 -Subject: [PATCH 48/74] Explicitly enable pypy for cibuildwheel (#18417) +Subject: [PATCH 04/34] Explicitly enable pypy for cibuildwheel (#18417) --- changelog.d/18417.misc | 1 + diff --git a/packages/overlays/matrix-synapse/patches/0005-Bump-actions-download-artifact-from-4.2.1-to-4.3.0-1.patch b/packages/overlays/matrix-synapse/patches/0005-Bump-actions-download-artifact-from-4.2.1-to-4.3.0-1.patch deleted file mode 100644
index e9d974e..0000000 --- a/packages/overlays/matrix-synapse/patches/0005-Bump-actions-download-artifact-from-4.2.1-to-4.3.0-1.patch +++ /dev/null
@@ -1,26 +0,0 @@ -From a87981f673fe944690202cc4067a02f0c666eee4 Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:06:13 +0100 -Subject: [PATCH 05/74] Bump actions/download-artifact from 4.2.1 to 4.3.0 - (#18364) - ---- - .github/workflows/release-artifacts.yml | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml -index 573264229f..e0b8f2faf4 100644 ---- a/.github/workflows/release-artifacts.yml -+++ b/.github/workflows/release-artifacts.yml -@@ -203,7 +203,7 @@ jobs: - runs-on: ubuntu-latest - steps: - - name: Download all workflow run artifacts -- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1 -+ uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - - name: Build a tarball for the debs - # We need to merge all the debs uploads into one folder, then compress - # that. --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0049-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch b/packages/overlays/matrix-synapse/patches/0005-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch
index eb5a706..5b1ec8e 100644 --- a/packages/overlays/matrix-synapse/patches/0049-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch +++ b/packages/overlays/matrix-synapse/patches/0005-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch
@@ -1,7 +1,7 @@ From 6e910e2b2c5cef393473dcc6bf957a8671a1186e Mon Sep 17 00:00:00 2001 From: Eric Eastwood <erice@element.io> Date: Tue, 13 May 2025 10:22:15 -0500 -Subject: [PATCH 49/74] Fix a couple type annotations in the +Subject: [PATCH 05/34] Fix a couple type annotations in the `RootConfig`/`Config` (#18409) Fix a couple type annotations in the `RootConfig`/`Config`. Discovered diff --git a/packages/overlays/matrix-synapse/patches/0006-Bump-stefanzweifel-git-auto-commit-action-from-5.1.0.patch b/packages/overlays/matrix-synapse/patches/0006-Bump-stefanzweifel-git-auto-commit-action-from-5.1.0.patch deleted file mode 100644
index ce2b729..0000000 --- a/packages/overlays/matrix-synapse/patches/0006-Bump-stefanzweifel-git-auto-commit-action-from-5.1.0.patch +++ /dev/null
@@ -1,25 +0,0 @@ -From 4c958c679a9c20930adfa25e64fc237fbf526591 Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:06:26 +0100 -Subject: [PATCH 06/74] Bump stefanzweifel/git-auto-commit-action from 5.1.0 to - 5.2.0 (#18354) - ---- - .github/workflows/fix_lint.yaml | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml -index fe699c1b2f..923e96a624 100644 ---- a/.github/workflows/fix_lint.yaml -+++ b/.github/workflows/fix_lint.yaml -@@ -44,6 +44,6 @@ jobs: - - run: cargo fmt - continue-on-error: true - -- - uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0 -+ - uses: stefanzweifel/git-auto-commit-action@b863ae1933cb653a53c021fe36dbb774e1fb9403 # v5.2.0 - with: - commit_message: "Attempt to fix linting" --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0050-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch b/packages/overlays/matrix-synapse/patches/0006-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch
index 2530521..39fc2fe 100644 --- a/packages/overlays/matrix-synapse/patches/0050-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch +++ b/packages/overlays/matrix-synapse/patches/0006-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch
@@ -1,7 +1,7 @@ From a3bbd7eeabee7c6b229e95e0e04af5b430ea32db Mon Sep 17 00:00:00 2001 From: Eric Eastwood <erice@element.io> Date: Tue, 13 May 2025 10:40:49 -0500 -Subject: [PATCH 50/74] Explain why we `flush_buffer()` for Python `print(...)` +Subject: [PATCH 06/34] Explain why we `flush_buffer()` for Python `print(...)` output (#18420) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 diff --git a/packages/overlays/matrix-synapse/patches/0007-Bump-anyhow-from-1.0.97-to-1.0.98-18336.patch b/packages/overlays/matrix-synapse/patches/0007-Bump-anyhow-from-1.0.97-to-1.0.98-18336.patch deleted file mode 100644
index 7827e2a..0000000 --- a/packages/overlays/matrix-synapse/patches/0007-Bump-anyhow-from-1.0.97-to-1.0.98-18336.patch +++ /dev/null
@@ -1,28 +0,0 @@ -From 39e17856a37570bda2fa912c6751e31bad6f970b Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:06:36 +0100 -Subject: [PATCH 07/74] Bump anyhow from 1.0.97 to 1.0.98 (#18336) - ---- - Cargo.lock | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/Cargo.lock b/Cargo.lock -index 1b17e9910a..e1c381e273 100644 ---- a/Cargo.lock -+++ b/Cargo.lock -@@ -13,9 +13,9 @@ dependencies = [ - - [[package]] - name = "anyhow" --version = "1.0.97" -+version = "1.0.98" - source = "registry+https://github.com/rust-lang/crates.io-index" --checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" -+checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" - - [[package]] - name = "arc-swap" --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0051-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch b/packages/overlays/matrix-synapse/patches/0007-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch
index c542c7c..3e21694 100644 --- a/packages/overlays/matrix-synapse/patches/0051-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch +++ b/packages/overlays/matrix-synapse/patches/0007-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch
@@ -1,7 +1,7 @@ From 194b923a6e625af6ca90bbbdc1f8a85a9215797e Mon Sep 17 00:00:00 2001 From: Kim Brose <2803622+HarHarLinks@users.noreply.github.com> Date: Wed, 14 May 2025 10:36:54 +0000 -Subject: [PATCH 51/74] Fix room_list_publication_rules docs for v1.126.0 +Subject: [PATCH 07/34] Fix room_list_publication_rules docs for v1.126.0 (#18286) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> diff --git a/packages/overlays/matrix-synapse/patches/0052-Add-option-to-allow-registrations-that-begin-with-_-.patch b/packages/overlays/matrix-synapse/patches/0008-Add-option-to-allow-registrations-that-begin-with-_-.patch
index 97ef21c..fa37022 100644 --- a/packages/overlays/matrix-synapse/patches/0052-Add-option-to-allow-registrations-that-begin-with-_-.patch +++ b/packages/overlays/matrix-synapse/patches/0008-Add-option-to-allow-registrations-that-begin-with-_-.patch
@@ -1,7 +1,7 @@ From 44ae5362fd952dbb209f4b52ee9c96641163f032 Mon Sep 17 00:00:00 2001 From: _ <x5f@fastmail.com> Date: Thu, 15 May 2025 04:31:52 -0700 -Subject: [PATCH 52/74] Add option to allow registrations that begin with '_' +Subject: [PATCH 08/34] Add option to allow registrations that begin with '_' (#18262) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> diff --git a/packages/overlays/matrix-synapse/patches/0008-Bump-pyo3-log-from-0.12.2-to-0.12.3-18317.patch b/packages/overlays/matrix-synapse/patches/0008-Bump-pyo3-log-from-0.12.2-to-0.12.3-18317.patch deleted file mode 100644
index 5c5ab88..0000000 --- a/packages/overlays/matrix-synapse/patches/0008-Bump-pyo3-log-from-0.12.2-to-0.12.3-18317.patch +++ /dev/null
@@ -1,28 +0,0 @@ -From 2ef782462011044718b0b3848f0cd33e5b2e1827 Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:07:06 +0100 -Subject: [PATCH 08/74] Bump pyo3-log from 0.12.2 to 0.12.3 (#18317) - ---- - Cargo.lock | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/Cargo.lock b/Cargo.lock -index e1c381e273..822eb2cdba 100644 ---- a/Cargo.lock -+++ b/Cargo.lock -@@ -316,9 +316,9 @@ dependencies = [ - - [[package]] - name = "pyo3-log" --version = "0.12.2" -+version = "0.12.3" - source = "registry+https://github.com/rust-lang/crates.io-index" --checksum = "4b78e4983ba15bc62833a0e0941d965bc03690163f1127864f1408db25063466" -+checksum = "7079e412e909af5d6be7c04a7f29f6a2837a080410e1c529c9dee2c367383db4" - dependencies = [ - "arc-swap", - "log", --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0009-Bump-types-psycopg2-from-2.9.21.20250121-to-2.9.21.2.patch b/packages/overlays/matrix-synapse/patches/0009-Bump-types-psycopg2-from-2.9.21.20250121-to-2.9.21.2.patch deleted file mode 100644
index 1416c13..0000000 --- a/packages/overlays/matrix-synapse/patches/0009-Bump-types-psycopg2-from-2.9.21.20250121-to-2.9.21.2.patch +++ /dev/null
@@ -1,92 +0,0 @@ -From b0795d0cb670b2e8e66839e729cce42eb681832e Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:07:15 +0100 -Subject: [PATCH 09/74] Bump types-psycopg2 from 2.9.21.20250121 to - 2.9.21.20250318 (#18316) - -Bumps [types-psycopg2](https://github.com/python/typeshed) from -2.9.21.20250121 to 2.9.21.20250318. -<details> -<summary>Commits</summary> -<ul> -<li>See full diff in <a -href="https://github.com/python/typeshed/commits">compare view</a></li> -</ul> -</details> -<br /> - - -[![Dependabot compatibility -score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=types-psycopg2&package-manager=pip&previous-version=2.9.21.20250121&new-version=2.9.21.20250318)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) - -Dependabot will resolve any conflicts with this PR as long as you don't -alter it yourself. You can also trigger a rebase manually by commenting -`@dependabot rebase`. - -[//]: # (dependabot-automerge-start) -[//]: # (dependabot-automerge-end) - ---- - -<details> -<summary>Dependabot commands and options</summary> -<br /> - -You can trigger Dependabot actions by commenting on this PR: -- `@dependabot rebase` will rebase this PR -- `@dependabot recreate` will recreate this PR, overwriting any edits -that have been made to it -- `@dependabot merge` will merge this PR after your CI passes on it -- `@dependabot squash and merge` will squash and merge this PR after -your CI passes on it -- `@dependabot cancel merge` will cancel a previously requested merge -and block automerging -- `@dependabot reopen` will reopen this PR if it is closed -- `@dependabot close` will close this PR and stop Dependabot recreating -it. You can achieve the same result by closing it manually -- `@dependabot show <dependency name> ignore conditions` will show all -of the ignore conditions of the specified dependency -- `@dependabot ignore this major version` will close this PR and stop -Dependabot creating any more for this major version (unless you reopen -the PR or upgrade to it yourself) -- `@dependabot ignore this minor version` will close this PR and stop -Dependabot creating any more for this minor version (unless you reopen -the PR or upgrade to it yourself) -- `@dependabot ignore this dependency` will close this PR and stop -Dependabot creating any more for this dependency (unless you reopen the -PR or upgrade to it yourself) - - -</details> - -Signed-off-by: dependabot[bot] <support@github.com> -Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> ---- - poetry.lock | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/poetry.lock b/poetry.lock -index 2bf511e8a6..51e73bae54 100644 ---- a/poetry.lock -+++ b/poetry.lock -@@ -3007,14 +3007,14 @@ files = [ - - [[package]] - name = "types-psycopg2" --version = "2.9.21.20250121" -+version = "2.9.21.20250318" - description = "Typing stubs for psycopg2" - optional = false - python-versions = ">=3.9" - groups = ["dev"] - files = [ -- {file = "types_psycopg2-2.9.21.20250121-py3-none-any.whl", hash = "sha256:b890dc6f5a08b6433f0ff73a4ec9a834deedad3e914f2a4a6fd43df021f745f1"}, -- {file = "types_psycopg2-2.9.21.20250121.tar.gz", hash = "sha256:2b0e2cd0f3747af1ae25a7027898716d80209604770ef3cbf350fe055b9c349b"}, -+ {file = "types_psycopg2-2.9.21.20250318-py3-none-any.whl", hash = "sha256:7296d111ad950bbd2fc979a1ab0572acae69047f922280e77db657c00d2c79c0"}, -+ {file = "types_psycopg2-2.9.21.20250318.tar.gz", hash = "sha256:eb6eac5bfb16adfd5f16b818918b9e26a40ede147e0f2bbffdf53a6ef7025a87"}, - ] - - [[package]] --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0054-remove-room-without-listeners-from-Notifier.room_to_.patch b/packages/overlays/matrix-synapse/patches/0009-remove-room-without-listeners-from-Notifier.room_to_.patch
index 8c90d7b..dfced13 100644 --- a/packages/overlays/matrix-synapse/patches/0054-remove-room-without-listeners-from-Notifier.room_to_.patch +++ b/packages/overlays/matrix-synapse/patches/0009-remove-room-without-listeners-from-Notifier.room_to_.patch
@@ -1,7 +1,7 @@ From 0afdc0fc7ffe2cb7a2fa6d47f22b685cbacc7223 Mon Sep 17 00:00:00 2001 From: Stanislav Kazantsev <stas.kazancev54@gmail.com> Date: Thu, 15 May 2025 23:18:17 +0600 -Subject: [PATCH 54/74] remove room without listeners from +Subject: [PATCH 09/34] remove room without listeners from Notifier.room_to_user_streams (#18380) Co-authored-by: Andrew Morgan <andrew@amorgan.xyz> diff --git a/packages/overlays/matrix-synapse/patches/0010-Bump-pyopenssl-from-24.3.0-to-25.0.0-18315.patch b/packages/overlays/matrix-synapse/patches/0010-Bump-pyopenssl-from-24.3.0-to-25.0.0-18315.patch deleted file mode 100644
index 4ca3c69..0000000 --- a/packages/overlays/matrix-synapse/patches/0010-Bump-pyopenssl-from-24.3.0-to-25.0.0-18315.patch +++ /dev/null
@@ -1,39 +0,0 @@ -From 7346760aed018eaf46a0bff2d0459b39881d2af5 Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:07:33 +0100 -Subject: [PATCH 10/74] Bump pyopenssl from 24.3.0 to 25.0.0 (#18315) - ---- - poetry.lock | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - -diff --git a/poetry.lock b/poetry.lock -index 51e73bae54..c6a6ce9826 100644 ---- a/poetry.lock -+++ b/poetry.lock -@@ -2053,18 +2053,19 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] - - [[package]] - name = "pyopenssl" --version = "24.3.0" -+version = "25.0.0" - description = "Python wrapper module around the OpenSSL library" - optional = false - python-versions = ">=3.7" - groups = ["main"] - files = [ -- {file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"}, -- {file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"}, -+ {file = "pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90"}, -+ {file = "pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16"}, - ] - - [package.dependencies] - cryptography = ">=41.0.5,<45" -+typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""} - - [package.extras] - docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"] --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0055-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch b/packages/overlays/matrix-synapse/patches/0010-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch
index be91744..203103d 100644 --- a/packages/overlays/matrix-synapse/patches/0055-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch +++ b/packages/overlays/matrix-synapse/patches/0010-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch
@@ -1,7 +1,7 @@ From 74e2f028bbcaeb2a572d03e66334f3c671bffae2 Mon Sep 17 00:00:00 2001 From: Shay <hillerys@element.io> Date: Mon, 19 May 2025 01:48:46 -0700 -Subject: [PATCH 55/74] Fix admin redaction endpoint not redacting encrypted +Subject: [PATCH 10/34] Fix admin redaction endpoint not redacting encrypted messages (#18434) --- diff --git a/packages/overlays/matrix-synapse/patches/0056-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch b/packages/overlays/matrix-synapse/patches/0011-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch
index 55c5fbd..bb31453 100644 --- a/packages/overlays/matrix-synapse/patches/0056-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch +++ b/packages/overlays/matrix-synapse/patches/0011-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch
@@ -1,7 +1,7 @@ From 078cefd014806a67249ddb59b5976c7e93227f37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 09:51:08 +0100 -Subject: [PATCH 56/74] Bump actions/setup-python from 5.5.0 to 5.6.0 (#18398) +Subject: [PATCH 11/34] Bump actions/setup-python from 5.5.0 to 5.6.0 (#18398) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit diff --git a/packages/overlays/matrix-synapse/patches/0011-Bump-types-jsonschema-from-4.23.0.20240813-to-4.23.0.patch b/packages/overlays/matrix-synapse/patches/0011-Bump-types-jsonschema-from-4.23.0.20240813-to-4.23.0.patch deleted file mode 100644
index 57a0e9c..0000000 --- a/packages/overlays/matrix-synapse/patches/0011-Bump-types-jsonschema-from-4.23.0.20240813-to-4.23.0.patch +++ /dev/null
@@ -1,35 +0,0 @@ -From 75832f25b08a058d01acde334033f76edc131ad5 Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:07:49 +0100 -Subject: [PATCH 11/74] Bump types-jsonschema from 4.23.0.20240813 to - 4.23.0.20241208 (#18305) - ---- - poetry.lock | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/poetry.lock b/poetry.lock -index c6a6ce9826..abd97a785b 100644 ---- a/poetry.lock -+++ b/poetry.lock -@@ -2957,14 +2957,14 @@ files = [ - - [[package]] - name = "types-jsonschema" --version = "4.23.0.20240813" -+version = "4.23.0.20241208" - description = "Typing stubs for jsonschema" - optional = false - python-versions = ">=3.8" - groups = ["dev"] - files = [ -- {file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"}, -- {file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"}, -+ {file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"}, -+ {file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"}, - ] - - [package.dependencies] --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0057-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch b/packages/overlays/matrix-synapse/patches/0012-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch
index ffb8bd3..6b1a48c 100644 --- a/packages/overlays/matrix-synapse/patches/0057-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch +++ b/packages/overlays/matrix-synapse/patches/0012-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch
@@ -1,7 +1,7 @@ From 7d4c3b64e34571f3ace10fa7e33d07853bf16d67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 09:51:52 +0100 -Subject: [PATCH 57/74] Bump docker/build-push-action from 6.15.0 to 6.16.0 +Subject: [PATCH 12/34] Bump docker/build-push-action from 6.15.0 to 6.16.0 (#18397) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 diff --git a/packages/overlays/matrix-synapse/patches/0012-Bump-softprops-action-gh-release-from-1-to-2-18264.patch b/packages/overlays/matrix-synapse/patches/0012-Bump-softprops-action-gh-release-from-1-to-2-18264.patch deleted file mode 100644
index 6206391..0000000 --- a/packages/overlays/matrix-synapse/patches/0012-Bump-softprops-action-gh-release-from-1-to-2-18264.patch +++ /dev/null
@@ -1,25 +0,0 @@ -From 0384fd72eeaa77dd56b52f38f7b339b95babe8dd Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 29 Apr 2025 10:08:20 +0100 -Subject: [PATCH 12/74] Bump softprops/action-gh-release from 1 to 2 (#18264) - ---- - .github/workflows/release-artifacts.yml | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml -index e0b8f2faf4..e03c9d2bd5 100644 ---- a/.github/workflows/release-artifacts.yml -+++ b/.github/workflows/release-artifacts.yml -@@ -213,7 +213,7 @@ jobs: - tar -cvJf debs.tar.xz debs - - name: Attach to release - # Pinned to work around https://github.com/softprops/action-gh-release/issues/445 -- uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15 -+ uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v0.1.15 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0058-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch b/packages/overlays/matrix-synapse/patches/0013-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch
index aa7b806..ce18e13 100644 --- a/packages/overlays/matrix-synapse/patches/0058-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch +++ b/packages/overlays/matrix-synapse/patches/0013-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch
@@ -1,7 +1,7 @@ From fa4a00a2da753a52dde582c0f56e3ea6567bd53b Mon Sep 17 00:00:00 2001 From: Erik Johnston <erikj@element.io> Date: Mon, 19 May 2025 11:52:05 +0100 -Subject: [PATCH 58/74] Check for `CREATE/DROP INDEX` in schema deltas (#18440) +Subject: [PATCH 13/34] Check for `CREATE/DROP INDEX` in schema deltas (#18440) As these should be background updates. --- diff --git a/packages/overlays/matrix-synapse/patches/0013-Do-not-retry-push-during-backoff-period-18363.patch b/packages/overlays/matrix-synapse/patches/0013-Do-not-retry-push-during-backoff-period-18363.patch deleted file mode 100644
index fffe68d..0000000 --- a/packages/overlays/matrix-synapse/patches/0013-Do-not-retry-push-during-backoff-period-18363.patch +++ /dev/null
@@ -1,128 +0,0 @@ -From e47de2b32de6183fd0cb91dda9b232de5d263345 Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erikj@element.io> -Date: Tue, 29 Apr 2025 14:08:11 +0100 -Subject: [PATCH 13/74] Do not retry push during backoff period (#18363) - -This fixes a bug where if a pusher gets told about a new event to push -it will ignore the backoff and immediately retry sending any pending -push. ---- - changelog.d/18363.bugfix | 1 + - synapse/push/httppusher.py | 6 +++ - tests/push/test_http.py | 78 ++++++++++++++++++++++++++++++++++++++ - 3 files changed, 85 insertions(+) - create mode 100644 changelog.d/18363.bugfix - -diff --git a/changelog.d/18363.bugfix b/changelog.d/18363.bugfix -new file mode 100644 -index 0000000000..bfa336d52f ---- /dev/null -+++ b/changelog.d/18363.bugfix -@@ -0,0 +1 @@ -+Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. -diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py -index 69790ecab5..7df8a128c9 100644 ---- a/synapse/push/httppusher.py -+++ b/synapse/push/httppusher.py -@@ -205,6 +205,12 @@ class HttpPusher(Pusher): - if self._is_processing: - return - -+ # Check if we are trying, but failing, to contact the pusher. If so, we -+ # don't try and start processing immediately and instead wait for the -+ # retry loop to try again later (which is controlled by the timer). -+ if self.failing_since and self.timed_call and self.timed_call.active(): -+ return -+ - run_as_background_process("httppush.process", self._process) - - async def _process(self) -> None: -diff --git a/tests/push/test_http.py b/tests/push/test_http.py -index 5c235bbe53..b42fd284b6 100644 ---- a/tests/push/test_http.py -+++ b/tests/push/test_http.py -@@ -1167,3 +1167,81 @@ class HTTPPusherTests(HomeserverTestCase): - self.assertEqual( - self.push_attempts[0][2]["notification"]["counts"]["unread"], 1 - ) -+ -+ def test_push_backoff(self) -> None: -+ """ -+ The HTTP pusher will backoff correctly if it fails to contact the pusher. -+ """ -+ -+ # Register the user who gets notified -+ user_id = self.register_user("user", "pass") -+ access_token = self.login("user", "pass") -+ -+ # Register the user who sends the message -+ other_user_id = self.register_user("otheruser", "pass") -+ other_access_token = self.login("otheruser", "pass") -+ -+ # Register the pusher -+ user_tuple = self.get_success( -+ self.hs.get_datastores().main.get_user_by_access_token(access_token) -+ ) -+ assert user_tuple is not None -+ device_id = user_tuple.device_id -+ -+ self.get_success( -+ self.hs.get_pusherpool().add_or_update_pusher( -+ user_id=user_id, -+ device_id=device_id, -+ kind="http", -+ app_id="m.http", -+ app_display_name="HTTP Push Notifications", -+ device_display_name="pushy push", -+ pushkey="a@example.com", -+ lang=None, -+ data={"url": "http://example.com/_matrix/push/v1/notify"}, -+ ) -+ ) -+ -+ # Create a room with the other user -+ room = self.helper.create_room_as(user_id, tok=access_token) -+ self.helper.join(room=room, user=other_user_id, tok=other_access_token) -+ -+ # The other user sends some messages -+ self.helper.send(room, body="Message 1", tok=other_access_token) -+ -+ # One push was attempted to be sent -+ self.assertEqual(len(self.push_attempts), 1) -+ self.assertEqual( -+ self.push_attempts[0][1], "http://example.com/_matrix/push/v1/notify" -+ ) -+ self.assertEqual( -+ self.push_attempts[0][2]["notification"]["content"]["body"], "Message 1" -+ ) -+ self.push_attempts[0][0].callback({}) -+ self.pump() -+ -+ # Send another message, this time it fails -+ self.helper.send(room, body="Message 2", tok=other_access_token) -+ self.assertEqual(len(self.push_attempts), 2) -+ self.push_attempts[1][0].errback(Exception("couldn't connect")) -+ self.pump() -+ -+ # Sending yet another message doesn't trigger a push immediately -+ self.helper.send(room, body="Message 3", tok=other_access_token) -+ self.pump() -+ self.assertEqual(len(self.push_attempts), 2) -+ -+ # .. but waiting for a bit will cause more pushes -+ self.reactor.advance(10) -+ self.assertEqual(len(self.push_attempts), 3) -+ self.assertEqual( -+ self.push_attempts[2][2]["notification"]["content"]["body"], "Message 2" -+ ) -+ self.push_attempts[2][0].callback({}) -+ self.pump() -+ -+ self.assertEqual(len(self.push_attempts), 4) -+ self.assertEqual( -+ self.push_attempts[3][2]["notification"]["content"]["body"], "Message 3" -+ ) -+ self.push_attempts[3][0].callback({}) --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0059-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch b/packages/overlays/matrix-synapse/patches/0014-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch
index 1605034..57dcac6 100644 --- a/packages/overlays/matrix-synapse/patches/0059-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch +++ b/packages/overlays/matrix-synapse/patches/0014-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch
@@ -1,7 +1,7 @@ From b3b24c69fcbdb67de04b0388aa104d43780ba88f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 13:04:15 +0100 -Subject: [PATCH 59/74] Bump pyo3-log from 0.12.3 to 0.12.4 (#18453) +Subject: [PATCH 14/34] Bump pyo3-log from 0.12.3 to 0.12.4 (#18453) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- diff --git a/packages/overlays/matrix-synapse/patches/0014-Slight-performance-increase-when-using-the-ratelimit.patch b/packages/overlays/matrix-synapse/patches/0014-Slight-performance-increase-when-using-the-ratelimit.patch deleted file mode 100644
index e6c87b2..0000000 --- a/packages/overlays/matrix-synapse/patches/0014-Slight-performance-increase-when-using-the-ratelimit.patch +++ /dev/null
@@ -1,123 +0,0 @@ -From ad140130cc3db503de3fd15aa2923417f46b700b Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erikj@element.io> -Date: Tue, 29 Apr 2025 14:08:22 +0100 -Subject: [PATCH 14/74] Slight performance increase when using the ratelimiter - (#18369) - -See the commits. ---- - changelog.d/18369.misc | 1 + - synapse/api/ratelimiting.py | 19 ++++++++----------- - synapse/rest/client/sync.py | 7 +++---- - tests/api/test_ratelimiting.py | 4 +--- - 4 files changed, 13 insertions(+), 18 deletions(-) - create mode 100644 changelog.d/18369.misc - -diff --git a/changelog.d/18369.misc b/changelog.d/18369.misc -new file mode 100644 -index 0000000000..f4c0e5f006 ---- /dev/null -+++ b/changelog.d/18369.misc -@@ -0,0 +1 @@ -+Slight performance increase when using the ratelimiter. -diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py -index 229329a5ae..8665b3b765 100644 ---- a/synapse/api/ratelimiting.py -+++ b/synapse/api/ratelimiting.py -@@ -20,8 +20,7 @@ - # - # - --from collections import OrderedDict --from typing import Hashable, Optional, Tuple -+from typing import Dict, Hashable, Optional, Tuple - - from synapse.api.errors import LimitExceededError - from synapse.config.ratelimiting import RatelimitSettings -@@ -80,12 +79,14 @@ class Ratelimiter: - self.store = store - self._limiter_name = cfg.key - -- # An ordered dictionary representing the token buckets tracked by this rate -+ # A dictionary representing the token buckets tracked by this rate - # limiter. Each entry maps a key of arbitrary type to a tuple representing: - # * The number of tokens currently in the bucket, - # * The time point when the bucket was last completely empty, and - # * The rate_hz (leak rate) of this particular bucket. -- self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict() -+ self.actions: Dict[Hashable, Tuple[float, float, float]] = {} -+ -+ self.clock.looping_call(self._prune_message_counts, 60 * 1000) - - def _get_key( - self, requester: Optional[Requester], key: Optional[Hashable] -@@ -169,9 +170,6 @@ class Ratelimiter: - rate_hz = rate_hz if rate_hz is not None else self.rate_hz - burst_count = burst_count if burst_count is not None else self.burst_count - -- # Remove any expired entries -- self._prune_message_counts(time_now_s) -- - # Check if there is an existing count entry for this key - action_count, time_start, _ = self._get_action_counts(key, time_now_s) - -@@ -246,13 +244,12 @@ class Ratelimiter: - action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s) - self.actions[key] = (action_count + n_actions, time_start, rate_hz) - -- def _prune_message_counts(self, time_now_s: float) -> None: -+ def _prune_message_counts(self) -> None: - """Remove message count entries that have not exceeded their defined - rate_hz limit -- -- Args: -- time_now_s: The current time - """ -+ time_now_s = self.clock.time() -+ - # We create a copy of the key list here as the dictionary is modified during - # the loop - for key in list(self.actions.keys()): -diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py -index 4fb9c0c8e7..bac02122d0 100644 ---- a/synapse/rest/client/sync.py -+++ b/synapse/rest/client/sync.py -@@ -24,7 +24,7 @@ from collections import defaultdict - from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union - - from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState --from synapse.api.errors import Codes, LimitExceededError, StoreError, SynapseError -+from synapse.api.errors import Codes, StoreError, SynapseError - from synapse.api.filtering import FilterCollection - from synapse.api.presence import UserPresenceState - from synapse.api.ratelimiting import Ratelimiter -@@ -248,9 +248,8 @@ class SyncRestServlet(RestServlet): - await self._server_notices_sender.on_user_syncing(user.to_string()) - - # ignore the presence update if the ratelimit is exceeded but do not pause the request -- try: -- await self._presence_per_user_limiter.ratelimit(requester, pause=0.0) -- except LimitExceededError: -+ allowed, _ = await self._presence_per_user_limiter.can_do_action(requester) -+ if not allowed: - affect_presence = False - logger.debug("User set_presence ratelimit exceeded; ignoring it.") - else: -diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py -index a59e168db1..1a1cbde74e 100644 ---- a/tests/api/test_ratelimiting.py -+++ b/tests/api/test_ratelimiting.py -@@ -220,9 +220,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): - - self.assertIn("test_id_1", limiter.actions) - -- self.get_success_or_raise( -- limiter.can_do_action(None, key="test_id_2", _time_now_s=10) -- ) -+ self.reactor.advance(60) - - self.assertNotIn("test_id_1", limiter.actions) - --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0060-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch b/packages/overlays/matrix-synapse/patches/0015-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch
index a24ca1f..a0dda80 100644 --- a/packages/overlays/matrix-synapse/patches/0060-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch +++ b/packages/overlays/matrix-synapse/patches/0015-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch
@@ -1,7 +1,7 @@ From cd1a3ac584d9a353e24e42354ae71028654f7f61 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 13:06:11 +0100 -Subject: [PATCH 60/74] Bump authlib from 1.5.1 to 1.5.2 (#18452) +Subject: [PATCH 15/34] Bump authlib from 1.5.1 to 1.5.2 (#18452) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- diff --git a/packages/overlays/matrix-synapse/patches/0015-Minor-performance-improvements-to-notifier-replicati.patch b/packages/overlays/matrix-synapse/patches/0015-Minor-performance-improvements-to-notifier-replicati.patch deleted file mode 100644
index 1e2bdec..0000000 --- a/packages/overlays/matrix-synapse/patches/0015-Minor-performance-improvements-to-notifier-replicati.patch +++ /dev/null
@@ -1,116 +0,0 @@ -From 4eaab31757f096a04f4278d722cdef1eb92a1743 Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erikj@element.io> -Date: Tue, 29 Apr 2025 14:08:32 +0100 -Subject: [PATCH 15/74] Minor performance improvements to notifier/replication - (#18367) - -These are some improvements to `on_new_event` which is a hot path. Not -sure how much this will save, but maybe like ~5%? - -Possibly easier to review commit-by-commit ---- - changelog.d/18367.misc | 1 + - synapse/notifier.py | 61 +++++++++++++++++++++--------------------- - 2 files changed, 32 insertions(+), 30 deletions(-) - create mode 100644 changelog.d/18367.misc - -diff --git a/changelog.d/18367.misc b/changelog.d/18367.misc -new file mode 100644 -index 0000000000..2e8b897fa6 ---- /dev/null -+++ b/changelog.d/18367.misc -@@ -0,0 +1 @@ -+Minor performance improvements to the notifier. -diff --git a/synapse/notifier.py b/synapse/notifier.py -index 88f531182a..1914d0c914 100644 ---- a/synapse/notifier.py -+++ b/synapse/notifier.py -@@ -66,7 +66,6 @@ from synapse.types import ( - from synapse.util.async_helpers import ( - timeout_deferred, - ) --from synapse.util.metrics import Measure - from synapse.util.stringutils import shortstr - from synapse.visibility import filter_events_for_client - -@@ -520,20 +519,22 @@ class Notifier: - users = users or [] - rooms = rooms or [] - -- with Measure(self.clock, "on_new_event"): -- user_streams: Set[_NotifierUserStream] = set() -- -- log_kv( -- { -- "waking_up_explicit_users": len(users), -- "waking_up_explicit_rooms": len(rooms), -- "users": shortstr(users), -- "rooms": shortstr(rooms), -- "stream": stream_key, -- "stream_id": new_token, -- } -- ) -+ user_streams: Set[_NotifierUserStream] = set() -+ -+ log_kv( -+ { -+ "waking_up_explicit_users": len(users), -+ "waking_up_explicit_rooms": len(rooms), -+ "users": shortstr(users), -+ "rooms": shortstr(rooms), -+ "stream": stream_key, -+ "stream_id": new_token, -+ } -+ ) - -+ # Only calculate which user streams to wake up if there are, in fact, -+ # any user streams registered. -+ if self.user_to_user_stream or self.room_to_user_streams: - for user in users: - user_stream = self.user_to_user_stream.get(str(user)) - if user_stream is not None: -@@ -565,25 +566,25 @@ class Notifier: - # We resolve all these deferreds in one go so that we only need to - # call `PreserveLoggingContext` once, as it has a bunch of overhead - # (to calculate performance stats) -- with PreserveLoggingContext(): -- for listener in listeners: -- listener.callback(current_token) -+ if listeners: -+ with PreserveLoggingContext(): -+ for listener in listeners: -+ listener.callback(current_token) - -- users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams)) -+ if user_streams: -+ users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams)) - -- self.notify_replication() -+ self.notify_replication() - -- # Notify appservices. -- try: -- self.appservice_handler.notify_interested_services_ephemeral( -- stream_key, -- new_token, -- users, -- ) -- except Exception: -- logger.exception( -- "Error notifying application services of ephemeral events" -- ) -+ # Notify appservices. -+ try: -+ self.appservice_handler.notify_interested_services_ephemeral( -+ stream_key, -+ new_token, -+ users, -+ ) -+ except Exception: -+ logger.exception("Error notifying application services of ephemeral events") - - def on_new_replication_data(self) -> None: - """Used to inform replication listeners that something has happened --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0061-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch b/packages/overlays/matrix-synapse/patches/0016-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch
index 782024f..f78bb13 100644 --- a/packages/overlays/matrix-synapse/patches/0061-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch +++ b/packages/overlays/matrix-synapse/patches/0016-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch
@@ -1,7 +1,7 @@ From afeb0e01c552216d0d987cd504aab440b07bdb10 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 13:06:45 +0100 -Subject: [PATCH 61/74] Bump pyopenssl from 25.0.0 to 25.1.0 (#18450) +Subject: [PATCH 16/34] Bump pyopenssl from 25.0.0 to 25.1.0 (#18450) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- diff --git a/packages/overlays/matrix-synapse/patches/0016-Fix-typo-in-docs-about-push-18320.patch b/packages/overlays/matrix-synapse/patches/0016-Fix-typo-in-docs-about-push-18320.patch deleted file mode 100644
index c92436a..0000000 --- a/packages/overlays/matrix-synapse/patches/0016-Fix-typo-in-docs-about-push-18320.patch +++ /dev/null
@@ -1,34 +0,0 @@ -From f79811ed80bebaa5b187637af6d16d413b07166e Mon Sep 17 00:00:00 2001 -From: Kim Brose <2803622+HarHarLinks@users.noreply.github.com> -Date: Wed, 30 Apr 2025 15:27:08 +0200 -Subject: [PATCH 16/74] Fix typo in docs about `push` (#18320) - ---- - changelog.d/18320.doc | 1 + - docs/usage/configuration/config_documentation.md | 2 +- - 2 files changed, 2 insertions(+), 1 deletion(-) - create mode 100644 changelog.d/18320.doc - -diff --git a/changelog.d/18320.doc b/changelog.d/18320.doc -new file mode 100644 -index 0000000000..d84c279940 ---- /dev/null -+++ b/changelog.d/18320.doc -@@ -0,0 +1 @@ -+Fix typo in docs about the `push` config option. Contributed by @HarHarLinks. -diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md -index 73fd9622ce..19dc9dd356 100644 ---- a/docs/usage/configuration/config_documentation.md -+++ b/docs/usage/configuration/config_documentation.md -@@ -4018,7 +4018,7 @@ This option has a number of sub-options. They are as follows: - * `include_content`: Clients requesting push notifications can either have the body of - the message sent in the notification poke along with other details - like the sender, or just the event ID and room ID (`event_id_only`). -- If clients choose the to have the body sent, this option controls whether the -+ If clients choose to have the body sent, this option controls whether the - notification request includes the content of the event (other details - like the sender are still included). If `event_id_only` is enabled, it - has no effect. --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0062-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch b/packages/overlays/matrix-synapse/patches/0017-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch
index 535f783..46ea888 100644 --- a/packages/overlays/matrix-synapse/patches/0062-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch +++ b/packages/overlays/matrix-synapse/patches/0017-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch
@@ -1,7 +1,7 @@ From 17e6b32966670550c5fb4f232b390dd25ec77759 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 May 2025 13:07:24 +0100 -Subject: [PATCH 62/74] Bump docker/build-push-action from 6.16.0 to 6.17.0 +Subject: [PATCH 17/34] Bump docker/build-push-action from 6.16.0 to 6.17.0 (#18449) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> diff --git a/packages/overlays/matrix-synapse/patches/0017-Optimize-Dockerfile-workers-18292.patch b/packages/overlays/matrix-synapse/patches/0017-Optimize-Dockerfile-workers-18292.patch deleted file mode 100644
index 6a0c1a3..0000000 --- a/packages/overlays/matrix-synapse/patches/0017-Optimize-Dockerfile-workers-18292.patch +++ /dev/null
@@ -1,138 +0,0 @@ -From 4097ada89fefe12e7ec6d2b7a3bfbc61e64e14a0 Mon Sep 17 00:00:00 2001 -From: Andrew Ferrazzutti <andrewf@element.io> -Date: Wed, 30 Apr 2025 09:54:30 -0400 -Subject: [PATCH 17/74] Optimize `Dockerfile-workers` (#18292) - -- Use a `uv:python` image for the first build layer, to reduce the -number of intermediate images required, as the -main Dockerfile uses that image already -- Use a cache mount for `apt` commands -- Skip a pointless install of `redis-server`, since the redis Docker -image is copied from instead -- Move some RUN steps out of the final image layer & into the build -layer - -Depends on https://github.com/element-hq/synapse/pull/18275 - -### Pull Request Checklist - -<!-- Please read -https://element-hq.github.io/synapse/latest/development/contributing_guide.html -before submitting your pull request --> - -* [x] Pull request is based on the develop branch -* [x] Pull request includes a [changelog -file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). -The entry should: -- Be a short description of your change which makes sense to users. -"Fixed a bug that prevented receiving messages from other servers." -instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - - Use markdown where necessary, mostly for `code blocks`. - - End with either a period (.) or an exclamation mark (!). - - Start with a capital letter. -- Feel free to credit yourself, by adding a sentence "Contributed by -@github_username." or "Contributed by [Your Name]." to the end of the -entry. -* [x] [Code -style](https://element-hq.github.io/synapse/latest/code_style.html) is -correct -(run the -[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) ---- - changelog.d/18292.docker | 1 + - docker/Dockerfile-workers | 50 +++++++++++++++++++++------------------ - 2 files changed, 28 insertions(+), 23 deletions(-) - create mode 100644 changelog.d/18292.docker - -diff --git a/changelog.d/18292.docker b/changelog.d/18292.docker -new file mode 100644 -index 0000000000..cdb95b369b ---- /dev/null -+++ b/changelog.d/18292.docker -@@ -0,0 +1 @@ -+Optimize the build of the workers image. -diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers -index dd0bf59994..a7f576184d 100644 ---- a/docker/Dockerfile-workers -+++ b/docker/Dockerfile-workers -@@ -3,18 +3,37 @@ - ARG SYNAPSE_VERSION=latest - ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION - ARG DEBIAN_VERSION=bookworm -+ARG PYTHON_VERSION=3.12 - --# first of all, we create a base image with an nginx which we can copy into the -+# first of all, we create a base image with dependencies which we can copy into the - # target image. For repeated rebuilds, this is much faster than apt installing - # each time. - --FROM docker.io/library/debian:${DEBIAN_VERSION}-slim AS deps_base -+FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base -+ -+ # Tell apt to keep downloaded package files, as we're using cache mounts. -+ RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache -+ - RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update -qq && \ - DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \ -- redis-server nginx-light -+ nginx-light -+ -+ RUN \ -+ # remove default page -+ rm /etc/nginx/sites-enabled/default && \ -+ # have nginx log to stderr/out -+ ln -sf /dev/stdout /var/log/nginx/access.log && \ -+ ln -sf /dev/stderr /var/log/nginx/error.log -+ -+ # --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache -+ # (mounted as --mount=type=cache) and the target directory. -+ RUN --mount=type=cache,target=/root/.cache/uv \ -+ uv pip install --link-mode=copy --prefix="/uv/usr/local" supervisor~=4.2 -+ -+ RUN mkdir -p /uv/etc/supervisor/conf.d - - # Similarly, a base to copy the redis server from. - # -@@ -27,31 +46,16 @@ FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base - # now build the final image, based on the the regular Synapse docker image - FROM $FROM - -- # Install supervisord with uv pip instead of apt, to avoid installing a second -- # copy of python. -- # --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache -- # (mounted as --mount=type=cache) and the target directory. -- RUN \ -- --mount=type=bind,from=ghcr.io/astral-sh/uv:0.6.8,source=/uv,target=/uv \ -- --mount=type=cache,target=/root/.cache/uv \ -- /uv pip install --link-mode=copy --prefix="/usr/local" supervisor~=4.2 -- -- RUN mkdir -p /etc/supervisor/conf.d -- -- # Copy over redis and nginx -+ # Copy over dependencies - COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin -- -+ COPY --from=deps_base /uv / - COPY --from=deps_base /usr/sbin/nginx /usr/sbin - COPY --from=deps_base /usr/share/nginx /usr/share/nginx - COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx - COPY --from=deps_base /etc/nginx /etc/nginx -- RUN rm /etc/nginx/sites-enabled/default -- RUN mkdir /var/log/nginx /var/lib/nginx -- RUN chown www-data /var/lib/nginx -- -- # have nginx log to stderr/out -- RUN ln -sf /dev/stdout /var/log/nginx/access.log -- RUN ln -sf /dev/stderr /var/log/nginx/error.log -+ COPY --from=deps_base /var/log/nginx /var/log/nginx -+ # chown to allow non-root user to write to http-*-temp-path dirs -+ COPY --from=deps_base --chown=www-data:root /var/lib/nginx /var/lib/nginx - - # Copy Synapse worker, nginx and supervisord configuration template files - COPY ./docker/conf-workers/* /conf/ --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0064-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch b/packages/overlays/matrix-synapse/patches/0018-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch
index f216b38..9aba9cb 100644 --- a/packages/overlays/matrix-synapse/patches/0064-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch +++ b/packages/overlays/matrix-synapse/patches/0018-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch
@@ -1,7 +1,7 @@ From 1f4ae2f9eb94808f651b683b4650092015ec39e1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 19 May 2025 17:50:02 +0100 -Subject: [PATCH 64/74] Allow only requiring a field be present in an SSO +Subject: [PATCH 18/34] Allow only requiring a field be present in an SSO response, rather than specifying a required value (#18454) --- diff --git a/packages/overlays/matrix-synapse/patches/0018-configure_workers_and_start.py-unify-python-path-182.patch b/packages/overlays/matrix-synapse/patches/0018-configure_workers_and_start.py-unify-python-path-182.patch deleted file mode 100644
index 077f5d0..0000000 --- a/packages/overlays/matrix-synapse/patches/0018-configure_workers_and_start.py-unify-python-path-182.patch +++ /dev/null
@@ -1,73 +0,0 @@ -From 7563b2a2a316a7b249ef847ddbf5b63064eb1cc2 Mon Sep 17 00:00:00 2001 -From: Andrew Ferrazzutti <andrewf@element.io> -Date: Wed, 30 Apr 2025 10:22:09 -0400 -Subject: [PATCH 18/74] configure_workers_and_start.py: unify python path - (#18291) - -Use absolute path for python in script shebang, and invoke child python -processes with sys.executable. This is consistent with the absolute path -used to invoke python elsewhere (like in the supervisor config). - -### Pull Request Checklist - -<!-- Please read -https://element-hq.github.io/synapse/latest/development/contributing_guide.html -before submitting your pull request --> - -* [x] Pull request is based on the develop branch -* [x] Pull request includes a [changelog -file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). -The entry should: -- Be a short description of your change which makes sense to users. -"Fixed a bug that prevented receiving messages from other servers." -instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - - Use markdown where necessary, mostly for `code blocks`. - - End with either a period (.) or an exclamation mark (!). - - Start with a capital letter. -- Feel free to credit yourself, by adding a sentence "Contributed by -@github_username." or "Contributed by [Your Name]." to the end of the -entry. -* [x] [Code -style](https://element-hq.github.io/synapse/latest/code_style.html) is -correct -(run the -[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) - ---------- - -Co-authored-by: Quentin Gliech <quenting@element.io> ---- - changelog.d/18291.docker | 1 + - docker/configure_workers_and_start.py | 4 ++-- - 2 files changed, 3 insertions(+), 2 deletions(-) - create mode 100644 changelog.d/18291.docker - -diff --git a/changelog.d/18291.docker b/changelog.d/18291.docker -new file mode 100644 -index 0000000000..b94c0e80e3 ---- /dev/null -+++ b/changelog.d/18291.docker -@@ -0,0 +1 @@ -+In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. -diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py -index 6d73e8feaa..ff5cff3221 100755 ---- a/docker/configure_workers_and_start.py -+++ b/docker/configure_workers_and_start.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/local/bin/python - # - # This file is licensed under the Affero General Public License (AGPL) version 3. - # -@@ -604,7 +604,7 @@ def generate_base_homeserver_config() -> None: - # start.py already does this for us, so just call that. - # note that this script is copied in in the official, monolith dockerfile - os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT) -- subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True) -+ subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True) - - - def parse_worker_types( --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0065-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch b/packages/overlays/matrix-synapse/patches/0019-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch
index 0e7362d..1905b13 100644 --- a/packages/overlays/matrix-synapse/patches/0065-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch +++ b/packages/overlays/matrix-synapse/patches/0019-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch
@@ -1,7 +1,7 @@ From 303c5c4daa6986a91ab4632bd4df0448199b1813 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 May 2025 12:03:10 +0100 -Subject: [PATCH 65/74] Bump setuptools from 72.1.0 to 78.1.1 (#18461) +Subject: [PATCH 19/34] Bump setuptools from 72.1.0 to 78.1.1 (#18461) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- diff --git a/packages/overlays/matrix-synapse/patches/0019-docker-use-shebangs-to-invoke-generated-scripts-1829.patch b/packages/overlays/matrix-synapse/patches/0019-docker-use-shebangs-to-invoke-generated-scripts-1829.patch deleted file mode 100644
index a86d43e..0000000 --- a/packages/overlays/matrix-synapse/patches/0019-docker-use-shebangs-to-invoke-generated-scripts-1829.patch +++ /dev/null
@@ -1,100 +0,0 @@ -From 5ab05e7b95a687967fe99be33cb33a9c62fee34b Mon Sep 17 00:00:00 2001 -From: Andrew Ferrazzutti <andrewf@element.io> -Date: Wed, 30 Apr 2025 10:26:08 -0400 -Subject: [PATCH 19/74] docker: use shebangs to invoke generated scripts - (#18295) - -When generating scripts from templates, don't add a leading newline so -that their shebangs may be handled correctly. - -### Pull Request Checklist - -<!-- Please read -https://element-hq.github.io/synapse/latest/development/contributing_guide.html -before submitting your pull request --> - -* [x] Pull request is based on the develop branch -* [x] Pull request includes a [changelog -file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). -The entry should: -- Be a short description of your change which makes sense to users. -"Fixed a bug that prevented receiving messages from other servers." -instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - - Use markdown where necessary, mostly for `code blocks`. - - End with either a period (.) or an exclamation mark (!). - - Start with a capital letter. -- Feel free to credit yourself, by adding a sentence "Contributed by -@github_username." or "Contributed by [Your Name]." to the end of the -entry. -* [x] [Code -style](https://element-hq.github.io/synapse/latest/code_style.html) is -correct -(run the -[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) - ---------- - -Co-authored-by: Quentin Gliech <quenting@element.io> ---- - changelog.d/18295.docker | 1 + - docker/Dockerfile-workers | 2 +- - docker/complement/Dockerfile | 2 +- - docker/configure_workers_and_start.py | 5 ++++- - 4 files changed, 7 insertions(+), 3 deletions(-) - create mode 100644 changelog.d/18295.docker - -diff --git a/changelog.d/18295.docker b/changelog.d/18295.docker -new file mode 100644 -index 0000000000..239def1f54 ---- /dev/null -+++ b/changelog.d/18295.docker -@@ -0,0 +1 @@ -+When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly. -diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers -index a7f576184d..6d0fc1440b 100644 ---- a/docker/Dockerfile-workers -+++ b/docker/Dockerfile-workers -@@ -74,4 +74,4 @@ FROM $FROM - # Replace the healthcheck with one which checks *all* the workers. The script - # is generated by configure_workers_and_start.py. - HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ -- CMD /bin/sh /healthcheck.sh -+ CMD ["/healthcheck.sh"] -diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile -index dd029c5fbc..6ed084fe5d 100644 ---- a/docker/complement/Dockerfile -+++ b/docker/complement/Dockerfile -@@ -58,4 +58,4 @@ ENTRYPOINT ["/start_for_complement.sh"] - - # Update the healthcheck to have a shorter check interval - HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ -- CMD /bin/sh /healthcheck.sh -+ CMD ["/healthcheck.sh"] -diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py -index ff5cff3221..8f96e57e50 100755 ---- a/docker/configure_workers_and_start.py -+++ b/docker/configure_workers_and_start.py -@@ -376,9 +376,11 @@ def convert(src: str, dst: str, **template_vars: object) -> None: - # - # We use append mode in case the files have already been written to by something else - # (for instance, as part of the instructions in a dockerfile). -+ exists = os.path.isfile(dst) - with open(dst, "a") as outfile: - # In case the existing file doesn't end with a newline -- outfile.write("\n") -+ if exists: -+ outfile.write("\n") - - outfile.write(rendered) - -@@ -998,6 +1000,7 @@ def generate_worker_files( - "/healthcheck.sh", - healthcheck_urls=healthcheck_urls, - ) -+ os.chmod("/healthcheck.sh", 0o755) - - # Ensure the logging directory exists - log_dir = data_dir + "/logs" --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0066-Update-postgres.md-18445.patch b/packages/overlays/matrix-synapse/patches/0020-Update-postgres.md-18445.patch
index dec69ac..62b2710 100644 --- a/packages/overlays/matrix-synapse/patches/0066-Update-postgres.md-18445.patch +++ b/packages/overlays/matrix-synapse/patches/0020-Update-postgres.md-18445.patch
@@ -1,7 +1,7 @@ From a6cb3533db77ebeb6b7ed86fb3d3dd86c046f4a4 Mon Sep 17 00:00:00 2001 From: Strac Consulting Engineers Pty Ltd <preminik@preminik.com> Date: Tue, 20 May 2025 23:31:05 +1000 -Subject: [PATCH 66/74] Update postgres.md (#18445) +Subject: [PATCH 20/34] Update postgres.md (#18445) --- changelog.d/18445.doc | 1 + diff --git a/packages/overlays/matrix-synapse/patches/0020-start_for_complement.sh-use-more-shell-builtins-1829.patch b/packages/overlays/matrix-synapse/patches/0020-start_for_complement.sh-use-more-shell-builtins-1829.patch deleted file mode 100644
index 1f45cdc..0000000 --- a/packages/overlays/matrix-synapse/patches/0020-start_for_complement.sh-use-more-shell-builtins-1829.patch +++ /dev/null
@@ -1,91 +0,0 @@ -From 7be6c711d4a57f990003613c0b9715e3ac1502cb Mon Sep 17 00:00:00 2001 -From: Andrew Ferrazzutti <andrewf@element.io> -Date: Wed, 30 Apr 2025 11:53:15 -0400 -Subject: [PATCH 20/74] start_for_complement.sh: use more shell builtins - (#18293) - -Avoid calling external tools when shell builtins suffice. - -### Pull Request Checklist - -<!-- Please read -https://element-hq.github.io/synapse/latest/development/contributing_guide.html -before submitting your pull request --> - -* [x] Pull request is based on the develop branch -* [x] Pull request includes a [changelog -file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). -The entry should: -- Be a short description of your change which makes sense to users. -"Fixed a bug that prevented receiving messages from other servers." -instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - - Use markdown where necessary, mostly for `code blocks`. - - End with either a period (.) or an exclamation mark (!). - - Start with a capital letter. -- Feel free to credit yourself, by adding a sentence "Contributed by -@github_username." or "Contributed by [Your Name]." to the end of the -entry. -* [x] [Code -style](https://element-hq.github.io/synapse/latest/code_style.html) is -correct -(run the -[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) - ---------- - -Co-authored-by: Quentin Gliech <quenting@element.io> ---- - changelog.d/18293.docker | 1 + - docker/complement/conf/start_for_complement.sh | 11 +++++------ - 2 files changed, 6 insertions(+), 6 deletions(-) - create mode 100644 changelog.d/18293.docker - -diff --git a/changelog.d/18293.docker b/changelog.d/18293.docker -new file mode 100644 -index 0000000000..df47a68bfe ---- /dev/null -+++ b/changelog.d/18293.docker -@@ -0,0 +1 @@ -+In start_for_complement.sh, replace some external program calls with shell builtins. -diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh -index 59b30e2051..a5e06396e2 100755 ---- a/docker/complement/conf/start_for_complement.sh -+++ b/docker/complement/conf/start_for_complement.sh -@@ -9,7 +9,7 @@ echo " Args: $*" - echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" - - function log { -- d=$(date +"%Y-%m-%d %H:%M:%S,%3N") -+ d=$(printf '%(%Y-%m-%d %H:%M:%S)T,%.3s\n' ${EPOCHREALTIME/./ }) - echo "$d $*" - } - -@@ -103,12 +103,11 @@ fi - # Note that both the key and certificate are in PEM format (not DER). - - # First generate a configuration file to set up a Subject Alternative Name. --cat > /conf/server.tls.conf <<EOF -+echo "\ - .include /etc/ssl/openssl.cnf - - [SAN] --subjectAltName=DNS:${SERVER_NAME} --EOF -+subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf - - # Generate an RSA key - openssl genrsa -out /conf/server.tls.key 2048 -@@ -123,8 +122,8 @@ openssl x509 -req -in /conf/server.tls.csr \ - -out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN - - # Assert that we have a Subject Alternative Name in the certificate. --# (grep will exit with 1 here if there isn't a SAN in the certificate.) --openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS: -+# (the test will exit with 1 here if there isn't a SAN in the certificate.) -+[[ $(openssl x509 -in /conf/server.tls.crt -noout -text) == *DNS:* ]] - - export SYNAPSE_TLS_CERT=/conf/server.tls.crt - export SYNAPSE_TLS_KEY=/conf/server.tls.key --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0021-Added-Pocket-ID-to-openid.md-18237.patch b/packages/overlays/matrix-synapse/patches/0021-Added-Pocket-ID-to-openid.md-18237.patch deleted file mode 100644
index 6816c77..0000000 --- a/packages/overlays/matrix-synapse/patches/0021-Added-Pocket-ID-to-openid.md-18237.patch +++ /dev/null
@@ -1,67 +0,0 @@ -From d59bbd8b6b342d41641fddf99035d38e3939f18c Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Martin=20Lav=C3=A9n?= <laven.martin@gmail.com> -Date: Wed, 30 Apr 2025 18:13:09 +0200 -Subject: [PATCH 21/74] Added Pocket ID to openid.md (#18237) - ---- - changelog.d/18237.doc | 1 + - docs/openid.md | 27 +++++++++++++++++++++++++++ - 2 files changed, 28 insertions(+) - create mode 100644 changelog.d/18237.doc - -diff --git a/changelog.d/18237.doc b/changelog.d/18237.doc -new file mode 100644 -index 0000000000..872f7cab7d ---- /dev/null -+++ b/changelog.d/18237.doc -@@ -0,0 +1 @@ -+Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider. -\ No newline at end of file -diff --git a/docs/openid.md b/docs/openid.md -index 5a3d7e9fba..f86ba189c7 100644 ---- a/docs/openid.md -+++ b/docs/openid.md -@@ -23,6 +23,7 @@ such as [Github][github-idp]. - [auth0]: https://auth0.com/ - [authentik]: https://goauthentik.io/ - [lemonldap]: https://lemonldap-ng.org/ -+[pocket-id]: https://pocket-id.org/ - [okta]: https://www.okta.com/ - [dex-idp]: https://github.com/dexidp/dex - [keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols -@@ -624,6 +625,32 @@ oidc_providers: - - Note that the fields `client_id` and `client_secret` are taken from the CURL response above. - -+### Pocket ID -+ -+[Pocket ID][pocket-id] is a simple OIDC provider that allows users to authenticate with their passkeys. -+1. Go to `OIDC Clients` -+2. Click on `Add OIDC Client` -+3. Add a name, for example `Synapse` -+4. Add `"https://auth.example.org/_synapse/client/oidc/callback` to `Callback URLs` # Replace `auth.example.org` with your domain -+5. Click on `Save` -+6. Note down your `Client ID` and `Client secret`, these will be used later -+ -+Synapse config: -+ -+```yaml -+oidc_providers: -+ - idp_id: pocket_id -+ idp_name: Pocket ID -+ issuer: "https://auth.example.org/" # Replace with your domain -+ client_id: "your-client-id" # Replace with the "Client ID" you noted down before -+ client_secret: "your-client-secret" # Replace with the "Client secret" you noted down before -+ scopes: ["openid", "profile"] -+ user_mapping_provider: -+ config: -+ localpart_template: "{{ user.preferred_username }}" -+ display_name_template: "{{ user.name }}" -+``` -+ - ### Shibboleth with OIDC Plugin - - [Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities. --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0067-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch b/packages/overlays/matrix-synapse/patches/0021-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch
index ab6c95d..f2c0d5c 100644 --- a/packages/overlays/matrix-synapse/patches/0067-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch +++ b/packages/overlays/matrix-synapse/patches/0021-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch
@@ -1,7 +1,7 @@ From 9d43bec3268d9a454fe992f25edfc013a50fb9cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 May 2025 15:23:30 +0100 -Subject: [PATCH 67/74] Bump ruff from 0.7.3 to 0.11.10 (#18451) +Subject: [PATCH 21/34] Bump ruff from 0.7.3 to 0.11.10 (#18451) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Morgan <andrew@amorgan.xyz> diff --git a/packages/overlays/matrix-synapse/patches/0070-Add-a-unit-test-for-the-phone-home-stats-18463.patch b/packages/overlays/matrix-synapse/patches/0022-Add-a-unit-test-for-the-phone-home-stats-18463.patch
index 613644c..b00e1c4 100644 --- a/packages/overlays/matrix-synapse/patches/0070-Add-a-unit-test-for-the-phone-home-stats-18463.patch +++ b/packages/overlays/matrix-synapse/patches/0022-Add-a-unit-test-for-the-phone-home-stats-18463.patch
@@ -1,7 +1,7 @@ From 4b1d9d5d0e3df7a3151c07f9d42b02dad13a27bf Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 20 May 2025 16:26:45 +0100 -Subject: [PATCH 70/74] Add a unit test for the phone home stats (#18463) +Subject: [PATCH 22/34] Add a unit test for the phone home stats (#18463) --- changelog.d/18463.misc | 1 + diff --git a/packages/overlays/matrix-synapse/patches/0022-docs-workers.md-Add-_matrix-federation-v1-event-to-l.patch b/packages/overlays/matrix-synapse/patches/0022-docs-workers.md-Add-_matrix-federation-v1-event-to-l.patch deleted file mode 100644
index 828f433..0000000 --- a/packages/overlays/matrix-synapse/patches/0022-docs-workers.md-Add-_matrix-federation-v1-event-to-l.patch +++ /dev/null
@@ -1,69 +0,0 @@ -From 2965c9970c0b2742885dc345f6d70df7d5686423 Mon Sep 17 00:00:00 2001 -From: Sebastian Spaeth <Sebastian@SSpaeth.de> -Date: Thu, 1 May 2025 16:11:59 +0200 -Subject: [PATCH 22/74] docs/workers.md: Add ^/_matrix/federation/v1/event/ to - list of delegatable endpoints (#18377) - ---- - changelog.d/18377.doc | 1 + - docker/configure_workers_and_start.py | 1 + - docs/upgrade.md | 10 ++++++++++ - docs/workers.md | 1 + - 4 files changed, 13 insertions(+) - create mode 100644 changelog.d/18377.doc - -diff --git a/changelog.d/18377.doc b/changelog.d/18377.doc -new file mode 100644 -index 0000000000..ceb2b64e5d ---- /dev/null -+++ b/changelog.d/18377.doc -@@ -0,0 +1 @@ -+Add `/_matrix/federation/v1/version` to list of federation endpoints that can be handled by workers. -diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py -index 8f96e57e50..df34d51f77 100755 ---- a/docker/configure_workers_and_start.py -+++ b/docker/configure_workers_and_start.py -@@ -202,6 +202,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { - "app": "synapse.app.generic_worker", - "listener_resources": ["federation"], - "endpoint_patterns": [ -+ "^/_matrix/federation/v1/version$", - "^/_matrix/federation/(v1|v2)/event/", - "^/_matrix/federation/(v1|v2)/state/", - "^/_matrix/federation/(v1|v2)/state_ids/", -diff --git a/docs/upgrade.md b/docs/upgrade.md -index 07a9641fdd..d508e2231e 100644 ---- a/docs/upgrade.md -+++ b/docs/upgrade.md -@@ -117,6 +117,16 @@ each upgrade are complete before moving on to the next upgrade, to avoid - stacking them up. You can monitor the currently running background updates with - [the Admin API](usage/administration/admin_api/background_updates.html#status). - -+# Upgrading to v1.130.0 -+ -+## Documented endpoint which can be delegated to a federation worker -+ -+The endpoint `^/_matrix/federation/v1/version$` can be delegated to a federation -+worker. This is not new behaviour, but had not been documented yet. The -+[list of delegatable endpoints](workers.md#synapseappgeneric_worker) has -+been updated to include it. Make sure to check your reverse proxy rules if you -+are using workers. -+ - # Upgrading to v1.126.0 - - ## Room list publication rules change -diff --git a/docs/workers.md b/docs/workers.md -index 9ebcc886b1..2597e78217 100644 ---- a/docs/workers.md -+++ b/docs/workers.md -@@ -200,6 +200,7 @@ information. - ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ - - # Federation requests -+ ^/_matrix/federation/v1/version$ - ^/_matrix/federation/v1/event/ - ^/_matrix/federation/v1/state/ - ^/_matrix/federation/v1/state_ids/ --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0023-Add-an-Admin-API-endpoint-to-fetch-scheduled-tasks-1.patch b/packages/overlays/matrix-synapse/patches/0023-Add-an-Admin-API-endpoint-to-fetch-scheduled-tasks-1.patch deleted file mode 100644
index c874ee0..0000000 --- a/packages/overlays/matrix-synapse/patches/0023-Add-an-Admin-API-endpoint-to-fetch-scheduled-tasks-1.patch +++ /dev/null
@@ -1,383 +0,0 @@ -From 6dc1ecd35972c95ce62c5e0563245845c9c64e49 Mon Sep 17 00:00:00 2001 -From: Shay <hillerys@element.io> -Date: Thu, 1 May 2025 11:30:00 -0700 -Subject: [PATCH 23/74] Add an Admin API endpoint to fetch scheduled tasks - (#18214) - ---- - changelog.d/18214.feature | 1 + - docs/admin_api/scheduled_tasks.md | 54 +++++++ - synapse/rest/admin/__init__.py | 2 + - synapse/rest/admin/scheduled_tasks.py | 70 +++++++++ - tests/rest/admin/test_scheduled_tasks.py | 192 +++++++++++++++++++++++ - 5 files changed, 319 insertions(+) - create mode 100644 changelog.d/18214.feature - create mode 100644 docs/admin_api/scheduled_tasks.md - create mode 100644 synapse/rest/admin/scheduled_tasks.py - create mode 100644 tests/rest/admin/test_scheduled_tasks.py - -diff --git a/changelog.d/18214.feature b/changelog.d/18214.feature -new file mode 100644 -index 0000000000..751cb7d383 ---- /dev/null -+++ b/changelog.d/18214.feature -@@ -0,0 +1 @@ -+Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. -\ No newline at end of file -diff --git a/docs/admin_api/scheduled_tasks.md b/docs/admin_api/scheduled_tasks.md -new file mode 100644 -index 0000000000..1708871a6d ---- /dev/null -+++ b/docs/admin_api/scheduled_tasks.md -@@ -0,0 +1,54 @@ -+# Show scheduled tasks -+ -+This API returns information about scheduled tasks. -+ -+To use it, you will need to authenticate by providing an `access_token` -+for a server admin: see [Admin API](../usage/administration/admin_api/). -+ -+The api is: -+``` -+GET /_synapse/admin/v1/scheduled_tasks -+``` -+ -+It returns a JSON body like the following: -+ -+```json -+{ -+ "scheduled_tasks": [ -+ { -+ "id": "GSA124oegf1", -+ "action": "shutdown_room", -+ "status": "complete", -+ "timestamp": 23423523, -+ "resource_id": "!roomid", -+ "result": "some result", -+ "error": null -+ } -+ ] -+} -+``` -+ -+**Query parameters:** -+ -+* `action_name`: string - Is optional. Returns only the scheduled tasks with the given action name. -+* `resource_id`: string - Is optional. Returns only the scheduled tasks with the given resource id. -+* `status`: string - Is optional. Returns only the scheduled tasks matching the given status, one of -+ - "scheduled" - Task is scheduled but not active -+ - "active" - Task is active and probably running, and if not will be run on next scheduler loop run -+ - "complete" - Task has completed successfully -+ - "failed" - Task is over and either returned a failed status, or had an exception -+ -+* `max_timestamp`: int - Is optional. Returns only the scheduled tasks with a timestamp inferior to the specified one. -+ -+**Response** -+ -+The following fields are returned in the JSON response body along with a `200` HTTP status code: -+ -+* `id`: string - ID of scheduled task. -+* `action`: string - The name of the scheduled task's action. -+* `status`: string - The status of the scheduled task. -+* `timestamp_ms`: integer - The timestamp (in milliseconds since the unix epoch) of the given task - If the status is "scheduled" then this represents when it should be launched. -+ Otherwise it represents the last time this task got a change of state. -+* `resource_id`: Optional string - The resource id of the scheduled task, if it possesses one -+* `result`: Optional Json - Any result of the scheduled task, if given -+* `error`: Optional string - If the task has the status "failed", the error associated with this failure -diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py -index 5977ded4a0..cf809d1a27 100644 ---- a/synapse/rest/admin/__init__.py -+++ b/synapse/rest/admin/__init__.py -@@ -86,6 +86,7 @@ from synapse.rest.admin.rooms import ( - RoomStateRestServlet, - RoomTimestampToEventRestServlet, - ) -+from synapse.rest.admin.scheduled_tasks import ScheduledTasksRestServlet - from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet - from synapse.rest.admin.statistics import ( - LargestRoomsStatistics, -@@ -338,6 +339,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - BackgroundUpdateStartJobRestServlet(hs).register(http_server) - ExperimentalFeaturesRestServlet(hs).register(http_server) - SuspendAccountRestServlet(hs).register(http_server) -+ ScheduledTasksRestServlet(hs).register(http_server) - - - def register_servlets_for_client_rest_resource( -diff --git a/synapse/rest/admin/scheduled_tasks.py b/synapse/rest/admin/scheduled_tasks.py -new file mode 100644 -index 0000000000..2ae13021b9 ---- /dev/null -+++ b/synapse/rest/admin/scheduled_tasks.py -@@ -0,0 +1,70 @@ -+# -+# This file is licensed under the Affero General Public License (AGPL) version 3. -+# -+# Copyright (C) 2025 New Vector, Ltd -+# -+# This program is free software: you can redistribute it and/or modify -+# it under the terms of the GNU Affero General Public License as -+# published by the Free Software Foundation, either version 3 of the -+# License, or (at your option) any later version. -+# -+# See the GNU Affero General Public License for more details: -+# <https://www.gnu.org/licenses/agpl-3.0.html>. -+# -+# -+# -+from typing import TYPE_CHECKING, Tuple -+ -+from synapse.http.servlet import RestServlet, parse_integer, parse_string -+from synapse.http.site import SynapseRequest -+from synapse.rest.admin import admin_patterns, assert_requester_is_admin -+from synapse.types import JsonDict, TaskStatus -+ -+if TYPE_CHECKING: -+ from synapse.server import HomeServer -+ -+ -+class ScheduledTasksRestServlet(RestServlet): -+ """Get a list of scheduled tasks and their statuses -+ optionally filtered by action name, resource id, status, and max timestamp -+ """ -+ -+ PATTERNS = admin_patterns("/scheduled_tasks$") -+ -+ def __init__(self, hs: "HomeServer"): -+ self._auth = hs.get_auth() -+ self._store = hs.get_datastores().main -+ -+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: -+ await assert_requester_is_admin(self._auth, request) -+ -+ # extract query params -+ action_name = parse_string(request, "action_name") -+ resource_id = parse_string(request, "resource_id") -+ status = parse_string(request, "job_status") -+ max_timestamp = parse_integer(request, "max_timestamp") -+ -+ actions = [action_name] if action_name else None -+ statuses = [TaskStatus(status)] if status else None -+ -+ tasks = await self._store.get_scheduled_tasks( -+ actions=actions, -+ resource_id=resource_id, -+ statuses=statuses, -+ max_timestamp=max_timestamp, -+ ) -+ -+ json_tasks = [] -+ for task in tasks: -+ result_task = { -+ "id": task.id, -+ "action": task.action, -+ "status": task.status, -+ "timestamp_ms": task.timestamp, -+ "resource_id": task.resource_id, -+ "result": task.result, -+ "error": task.error, -+ } -+ json_tasks.append(result_task) -+ -+ return 200, {"scheduled_tasks": json_tasks} -diff --git a/tests/rest/admin/test_scheduled_tasks.py b/tests/rest/admin/test_scheduled_tasks.py -new file mode 100644 -index 0000000000..9654e9322b ---- /dev/null -+++ b/tests/rest/admin/test_scheduled_tasks.py -@@ -0,0 +1,192 @@ -+# -+# This file is licensed under the Affero General Public License (AGPL) version 3. -+# -+# Copyright (C) 2025 New Vector, Ltd -+# -+# This program is free software: you can redistribute it and/or modify -+# it under the terms of the GNU Affero General Public License as -+# published by the Free Software Foundation, either version 3 of the -+# License, or (at your option) any later version. -+# -+# See the GNU Affero General Public License for more details: -+# <https://www.gnu.org/licenses/agpl-3.0.html>. -+# -+# -+# -+from typing import Mapping, Optional, Tuple -+ -+from twisted.test.proto_helpers import MemoryReactor -+ -+import synapse.rest.admin -+from synapse.api.errors import Codes -+from synapse.rest.client import login -+from synapse.server import HomeServer -+from synapse.types import JsonMapping, ScheduledTask, TaskStatus -+from synapse.util import Clock -+ -+from tests import unittest -+ -+ -+class ScheduledTasksAdminApiTestCase(unittest.HomeserverTestCase): -+ servlets = [ -+ synapse.rest.admin.register_servlets, -+ login.register_servlets, -+ ] -+ -+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: -+ self.store = hs.get_datastores().main -+ self.admin_user = self.register_user("admin", "pass", admin=True) -+ self.admin_user_tok = self.login("admin", "pass") -+ self._task_scheduler = hs.get_task_scheduler() -+ -+ # create and schedule a few tasks -+ async def _test_task( -+ task: ScheduledTask, -+ ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: -+ return TaskStatus.ACTIVE, None, None -+ -+ async def _finished_test_task( -+ task: ScheduledTask, -+ ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: -+ return TaskStatus.COMPLETE, None, None -+ -+ async def _failed_test_task( -+ task: ScheduledTask, -+ ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: -+ return TaskStatus.FAILED, None, "Everything failed" -+ -+ self._task_scheduler.register_action(_test_task, "test_task") -+ self.get_success( -+ self._task_scheduler.schedule_task("test_task", resource_id="test") -+ ) -+ -+ self._task_scheduler.register_action(_finished_test_task, "finished_test_task") -+ self.get_success( -+ self._task_scheduler.schedule_task( -+ "finished_test_task", resource_id="finished_task" -+ ) -+ ) -+ -+ self._task_scheduler.register_action(_failed_test_task, "failed_test_task") -+ self.get_success( -+ self._task_scheduler.schedule_task( -+ "failed_test_task", resource_id="failed_task" -+ ) -+ ) -+ -+ def check_scheduled_tasks_response(self, scheduled_tasks: Mapping) -> list: -+ result = [] -+ for task in scheduled_tasks: -+ if task["resource_id"] == "test": -+ self.assertEqual(task["status"], TaskStatus.ACTIVE) -+ self.assertEqual(task["action"], "test_task") -+ result.append(task) -+ if task["resource_id"] == "finished_task": -+ self.assertEqual(task["status"], TaskStatus.COMPLETE) -+ self.assertEqual(task["action"], "finished_test_task") -+ result.append(task) -+ if task["resource_id"] == "failed_task": -+ self.assertEqual(task["status"], TaskStatus.FAILED) -+ self.assertEqual(task["action"], "failed_test_task") -+ result.append(task) -+ -+ return result -+ -+ def test_requester_is_not_admin(self) -> None: -+ """ -+ If the user is not a server admin, an error 403 is returned. -+ """ -+ -+ self.register_user("user", "pass", admin=False) -+ other_user_tok = self.login("user", "pass") -+ -+ channel = self.make_request( -+ "GET", -+ "/_synapse/admin/v1/scheduled_tasks", -+ content={}, -+ access_token=other_user_tok, -+ ) -+ -+ self.assertEqual(403, channel.code, msg=channel.json_body) -+ self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) -+ -+ def test_scheduled_tasks(self) -> None: -+ """ -+ Test that endpoint returns scheduled tasks. -+ """ -+ -+ channel = self.make_request( -+ "GET", -+ "/_synapse/admin/v1/scheduled_tasks", -+ content={}, -+ access_token=self.admin_user_tok, -+ ) -+ self.assertEqual(200, channel.code, msg=channel.json_body) -+ scheduled_tasks = channel.json_body["scheduled_tasks"] -+ -+ # make sure we got back all the scheduled tasks -+ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) -+ self.assertEqual(len(found_tasks), 3) -+ -+ def test_filtering_scheduled_tasks(self) -> None: -+ """ -+ Test that filtering the scheduled tasks response via query params works as expected. -+ """ -+ # filter via job_status -+ channel = self.make_request( -+ "GET", -+ "/_synapse/admin/v1/scheduled_tasks?job_status=active", -+ content={}, -+ access_token=self.admin_user_tok, -+ ) -+ self.assertEqual(200, channel.code, msg=channel.json_body) -+ scheduled_tasks = channel.json_body["scheduled_tasks"] -+ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) -+ -+ # only the active task should have been returned -+ self.assertEqual(len(found_tasks), 1) -+ self.assertEqual(found_tasks[0]["status"], "active") -+ -+ # filter via action_name -+ channel = self.make_request( -+ "GET", -+ "/_synapse/admin/v1/scheduled_tasks?action_name=test_task", -+ content={}, -+ access_token=self.admin_user_tok, -+ ) -+ self.assertEqual(200, channel.code, msg=channel.json_body) -+ scheduled_tasks = channel.json_body["scheduled_tasks"] -+ -+ # only test_task should have been returned -+ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) -+ self.assertEqual(len(found_tasks), 1) -+ self.assertEqual(found_tasks[0]["action"], "test_task") -+ -+ # filter via max_timestamp -+ channel = self.make_request( -+ "GET", -+ "/_synapse/admin/v1/scheduled_tasks?max_timestamp=0", -+ content={}, -+ access_token=self.admin_user_tok, -+ ) -+ self.assertEqual(200, channel.code, msg=channel.json_body) -+ scheduled_tasks = channel.json_body["scheduled_tasks"] -+ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) -+ -+ # none should have been returned -+ self.assertEqual(len(found_tasks), 0) -+ -+ # filter via resource id -+ channel = self.make_request( -+ "GET", -+ "/_synapse/admin/v1/scheduled_tasks?resource_id=failed_task", -+ content={}, -+ access_token=self.admin_user_tok, -+ ) -+ self.assertEqual(200, channel.code, msg=channel.json_body) -+ scheduled_tasks = channel.json_body["scheduled_tasks"] -+ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) -+ -+ # only the task with the matching resource id should have been returned -+ self.assertEqual(len(found_tasks), 1) -+ self.assertEqual(found_tasks[0]["resource_id"], "failed_task") --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0071-Include-room-ID-in-room-deletion-status-response-183.patch b/packages/overlays/matrix-synapse/patches/0023-Include-room-ID-in-room-deletion-status-response-183.patch
index 4ce28c4..f14e7ed 100644 --- a/packages/overlays/matrix-synapse/patches/0071-Include-room-ID-in-room-deletion-status-response-183.patch +++ b/packages/overlays/matrix-synapse/patches/0023-Include-room-ID-in-room-deletion-status-response-183.patch
@@ -1,7 +1,7 @@ From 553e124f766584456fbdb6d1aa37fdd12ad54dad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dagfinn=20Ilmari=20Manns=C3=A5ker?= <ilmari@ilmari.org> Date: Tue, 20 May 2025 17:53:30 +0100 -Subject: [PATCH 71/74] Include room ID in room deletion status response +Subject: [PATCH 23/34] Include room ID in room deletion status response (#18318) When querying by `delete_id` it's handy to see which room the delete diff --git a/packages/overlays/matrix-synapse/patches/0024-Policy-server-part-1-Actually-call-the-policy-server.patch b/packages/overlays/matrix-synapse/patches/0024-Policy-server-part-1-Actually-call-the-policy-server.patch new file mode 100644
index 0000000..528c970 --- /dev/null +++ b/packages/overlays/matrix-synapse/patches/0024-Policy-server-part-1-Actually-call-the-policy-server.patch
@@ -0,0 +1,666 @@ +From b7d48419476f70e54dc24ecd986562ba22be52ec Mon Sep 17 00:00:00 2001 +From: Travis Ralston <travisr@element.io> +Date: Wed, 21 May 2025 16:09:09 -0600 +Subject: [PATCH 24/34] Policy server part 1: Actually call the policy server + (#18387) + +Roughly reviewable commit-by-commit. + +This is the first part of adding policy server support to Synapse. Other +parts (unordered), which may or may not be bundled into fewer PRs, +include: + +* Implementation of a bulk API +* Supporting a moderation server config (the `fallback_*` options of +https://github.com/element-hq/policyserv_spam_checker ) +* Adding an "early event hook" for appservices to receive federation +transactions *before* events are processed formally +* Performance and stability improvements + +### Pull Request Checklist + +<!-- Please read +https://element-hq.github.io/synapse/latest/development/contributing_guide.html +before submitting your pull request --> + +* [x] Pull request is based on the develop branch +* [x] Pull request includes a [changelog +file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). +The entry should: +- Be a short description of your change which makes sense to users. +"Fixed a bug that prevented receiving messages from other servers." +instead of "Moved X method from `EventStore` to `EventWorkerStore`.". + - Use markdown where necessary, mostly for `code blocks`. + - End with either a period (.) or an exclamation mark (!). + - Start with a capital letter. +- Feel free to credit yourself, by adding a sentence "Contributed by +@github_username." or "Contributed by [Your Name]." to the end of the +entry. +* [x] [Code +style](https://element-hq.github.io/synapse/latest/code_style.html) is +correct +(run the +[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) + +--------- + +Co-authored-by: turt2live <1190097+turt2live@users.noreply.github.com> +Co-authored-by: Devon Hudson <devon.dmytro@gmail.com> +--- + changelog.d/18387.feature | 1 + + synapse/federation/federation_base.py | 34 ++++ + synapse/federation/federation_client.py | 57 ++++++ + synapse/federation/transport/client.py | 27 +++ + synapse/handlers/message.py | 15 +- + synapse/handlers/room_policy.py | 89 ++++++++++ + synapse/server.py | 5 + + synapse/types/handlers/policy_server.py | 16 ++ + tests/handlers/test_room_policy.py | 226 ++++++++++++++++++++++++ + 9 files changed, 469 insertions(+), 1 deletion(-) + create mode 100644 changelog.d/18387.feature + create mode 100644 synapse/handlers/room_policy.py + create mode 100644 synapse/types/handlers/policy_server.py + create mode 100644 tests/handlers/test_room_policy.py + +diff --git a/changelog.d/18387.feature b/changelog.d/18387.feature +new file mode 100644 +index 0000000000..2d9ff2cea2 +--- /dev/null ++++ b/changelog.d/18387.feature +@@ -0,0 +1 @@ ++Add support for calling Policy Servers ([MSC4284](https://github.com/matrix-org/matrix-spec-proposals/pull/4284)) to mark events as spam. +\ No newline at end of file +diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py +index 3796bff5e7..45593430e8 100644 +--- a/synapse/federation/federation_base.py ++++ b/synapse/federation/federation_base.py +@@ -30,6 +30,7 @@ from synapse.crypto.keyring import Keyring + from synapse.events import EventBase, make_event_from_dict + from synapse.events.utils import prune_event, validate_canonicaljson + from synapse.federation.units import filter_pdus_for_valid_depth ++from synapse.handlers.room_policy import RoomPolicyHandler + from synapse.http.servlet import assert_params_in_dict + from synapse.logging.opentracing import log_kv, trace + from synapse.types import JsonDict, get_domain_from_id +@@ -64,6 +65,24 @@ class FederationBase: + self._clock = hs.get_clock() + self._storage_controllers = hs.get_storage_controllers() + ++ # We need to define this lazily otherwise we get a cyclic dependency. ++ # self._policy_handler = hs.get_room_policy_handler() ++ self._policy_handler: Optional[RoomPolicyHandler] = None ++ ++ def _lazily_get_policy_handler(self) -> RoomPolicyHandler: ++ """Lazily get the room policy handler. ++ ++ This is required to avoid an import cycle: RoomPolicyHandler requires a ++ FederationClient, which requires a FederationBase, which requires a ++ RoomPolicyHandler. ++ ++ Returns: ++ RoomPolicyHandler: The room policy handler. ++ """ ++ if self._policy_handler is None: ++ self._policy_handler = self.hs.get_room_policy_handler() ++ return self._policy_handler ++ + @trace + async def _check_sigs_and_hash( + self, +@@ -80,6 +99,10 @@ class FederationBase: + Also runs the event through the spam checker; if it fails, redacts the event + and flags it as soft-failed. + ++ Also checks that the event is allowed by the policy server, if the room uses ++ a policy server. If the event is not allowed, the event is flagged as ++ soft-failed but not redacted. ++ + Args: + room_version: The room version of the PDU + pdu: the event to be checked +@@ -145,6 +168,17 @@ class FederationBase: + ) + return redacted_event + ++ policy_allowed = await self._lazily_get_policy_handler().is_event_allowed(pdu) ++ if not policy_allowed: ++ logger.warning( ++ "Event not allowed by policy server, soft-failing %s", pdu.event_id ++ ) ++ pdu.internal_metadata.soft_failed = True ++ # Note: we don't redact the event so admins can inspect the event after the ++ # fact. Other processes may redact the event, but that won't be applied to ++ # the database copy of the event until the server's config requires it. ++ return pdu ++ + spam_check = await self._spam_checker_module_callbacks.check_event_for_spam(pdu) + + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: +diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py +index 9fc5b70e9a..7c485aa7e0 100644 +--- a/synapse/federation/federation_client.py ++++ b/synapse/federation/federation_client.py +@@ -75,6 +75,7 @@ from synapse.http.client import is_unknown_endpoint + from synapse.http.types import QueryParams + from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace + from synapse.types import JsonDict, StrCollection, UserID, get_domain_from_id ++from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM + from synapse.util.async_helpers import concurrently_execute + from synapse.util.caches.expiringcache import ExpiringCache + from synapse.util.retryutils import NotRetryingDestination +@@ -421,6 +422,62 @@ class FederationClient(FederationBase): + + return None + ++ @trace ++ @tag_args ++ async def get_pdu_policy_recommendation( ++ self, destination: str, pdu: EventBase, timeout: Optional[int] = None ++ ) -> str: ++ """Requests that the destination server (typically a policy server) ++ check the event and return its recommendation on how to handle the ++ event. ++ ++ If the policy server could not be contacted or the policy server ++ returned an unknown recommendation, this returns an OK recommendation. ++ This type fixing behaviour is done because the typical caller will be ++ in a critical call path and would generally interpret a `None` or similar ++ response as "weird value; don't care; move on without taking action". We ++ just frontload that logic here. ++ ++ ++ Args: ++ destination: The remote homeserver to ask (a policy server) ++ pdu: The event to check ++ timeout: How long to try (in ms) the destination for before ++ giving up. None indicates no timeout. ++ ++ Returns: ++ The policy recommendation, or RECOMMENDATION_OK if the policy server was ++ uncontactable or returned an unknown recommendation. ++ """ ++ ++ logger.debug( ++ "get_pdu_policy_recommendation for event_id=%s from %s", ++ pdu.event_id, ++ destination, ++ ) ++ ++ try: ++ res = await self.transport_layer.get_policy_recommendation_for_pdu( ++ destination, pdu, timeout=timeout ++ ) ++ recommendation = res.get("recommendation") ++ if not isinstance(recommendation, str): ++ raise InvalidResponseError("recommendation is not a string") ++ if recommendation not in (RECOMMENDATION_OK, RECOMMENDATION_SPAM): ++ logger.warning( ++ "get_pdu_policy_recommendation: unknown recommendation: %s", ++ recommendation, ++ ) ++ return RECOMMENDATION_OK ++ return recommendation ++ except Exception as e: ++ logger.warning( ++ "get_pdu_policy_recommendation: server %s responded with error, assuming OK recommendation: %s", ++ destination, ++ e, ++ ) ++ return RECOMMENDATION_OK ++ + @trace + @tag_args + async def get_pdu( +diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py +index 206e91ed14..62bf96ce91 100644 +--- a/synapse/federation/transport/client.py ++++ b/synapse/federation/transport/client.py +@@ -143,6 +143,33 @@ class TransportLayerClient: + destination, path=path, timeout=timeout, try_trailing_slash_on_400=True + ) + ++ async def get_policy_recommendation_for_pdu( ++ self, destination: str, event: EventBase, timeout: Optional[int] = None ++ ) -> JsonDict: ++ """Requests the policy recommendation for the given pdu from the given policy server. ++ ++ Args: ++ destination: The host name of the remote homeserver checking the event. ++ event: The event to check. ++ timeout: How long to try (in ms) the destination for before giving up. ++ None indicates no timeout. ++ ++ Returns: ++ The full recommendation object from the remote server. ++ """ ++ logger.debug( ++ "get_policy_recommendation_for_pdu dest=%s, event_id=%s", ++ destination, ++ event.event_id, ++ ) ++ return await self.client.post_json( ++ destination=destination, ++ path=f"/_matrix/policy/unstable/org.matrix.msc4284/event/{event.event_id}/check", ++ data=event.get_pdu_json(), ++ ignore_backoff=True, ++ timeout=timeout, ++ ) ++ + async def backfill( + self, destination: str, room_id: str, event_tuples: Collection[str], limit: int + ) -> Optional[Union[JsonDict, list]]: +diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py +index ff6eb5a514..cb6de02309 100644 +--- a/synapse/handlers/message.py ++++ b/synapse/handlers/message.py +@@ -495,6 +495,7 @@ class EventCreationHandler: + self._instance_name = hs.get_instance_name() + self._notifier = hs.get_notifier() + self._worker_lock_handler = hs.get_worker_locks_handler() ++ self._policy_handler = hs.get_room_policy_handler() + + self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state + +@@ -1108,6 +1109,18 @@ class EventCreationHandler: + event.sender, + ) + ++ policy_allowed = await self._policy_handler.is_event_allowed(event) ++ if not policy_allowed: ++ logger.warning( ++ "Event not allowed by policy server, rejecting %s", ++ event.event_id, ++ ) ++ raise SynapseError( ++ 403, ++ "This message has been rejected as probable spam", ++ Codes.FORBIDDEN, ++ ) ++ + spam_check_result = ( + await self._spam_checker_module_callbacks.check_event_for_spam( + event +@@ -1119,7 +1132,7 @@ class EventCreationHandler: + [code, dict] = spam_check_result + raise SynapseError( + 403, +- "This message had been rejected as probable spam", ++ "This message has been rejected as probable spam", + code, + dict, + ) +diff --git a/synapse/handlers/room_policy.py b/synapse/handlers/room_policy.py +new file mode 100644 +index 0000000000..dcfebb128c +--- /dev/null ++++ b/synapse/handlers/room_policy.py +@@ -0,0 +1,89 @@ ++# ++# This file is licensed under the Affero General Public License (AGPL) version 3. ++# ++# Copyright 2016-2021 The Matrix.org Foundation C.I.C. ++# Copyright (C) 2023 New Vector, Ltd ++# ++# This program is free software: you can redistribute it and/or modify ++# it under the terms of the GNU Affero General Public License as ++# published by the Free Software Foundation, either version 3 of the ++# License, or (at your option) any later version. ++# ++# See the GNU Affero General Public License for more details: ++# <https://www.gnu.org/licenses/agpl-3.0.html>. ++# ++# ++ ++import logging ++from typing import TYPE_CHECKING ++ ++from synapse.events import EventBase ++from synapse.types.handlers.policy_server import RECOMMENDATION_OK ++from synapse.util.stringutils import parse_and_validate_server_name ++ ++if TYPE_CHECKING: ++ from synapse.server import HomeServer ++ ++logger = logging.getLogger(__name__) ++ ++ ++class RoomPolicyHandler: ++ def __init__(self, hs: "HomeServer"): ++ self._hs = hs ++ self._store = hs.get_datastores().main ++ self._storage_controllers = hs.get_storage_controllers() ++ self._event_auth_handler = hs.get_event_auth_handler() ++ self._federation_client = hs.get_federation_client() ++ ++ async def is_event_allowed(self, event: EventBase) -> bool: ++ """Check if the given event is allowed in the room by the policy server. ++ ++ Note: This will *always* return True if the room's policy server is Synapse ++ itself. This is because Synapse can't be a policy server (currently). ++ ++ If no policy server is configured in the room, this returns True. Similarly, if ++ the policy server is invalid in any way (not joined, not a server, etc), this ++ returns True. ++ ++ If a valid and contactable policy server is configured in the room, this returns ++ True if that server suggests the event is not spammy, and False otherwise. ++ ++ Args: ++ event: The event to check. This should be a fully-formed PDU. ++ ++ Returns: ++ bool: True if the event is allowed in the room, False otherwise. ++ """ ++ policy_event = await self._storage_controllers.state.get_current_state_event( ++ event.room_id, "org.matrix.msc4284.policy", "" ++ ) ++ if not policy_event: ++ return True # no policy server == default allow ++ ++ policy_server = policy_event.content.get("via", "") ++ if policy_server is None or not isinstance(policy_server, str): ++ return True # no policy server == default allow ++ ++ if policy_server == self._hs.hostname: ++ return True # Synapse itself can't be a policy server (currently) ++ ++ try: ++ parse_and_validate_server_name(policy_server) ++ except ValueError: ++ return True # invalid policy server == default allow ++ ++ is_in_room = await self._event_auth_handler.is_host_in_room( ++ event.room_id, policy_server ++ ) ++ if not is_in_room: ++ return True # policy server not in room == default allow ++ ++ # At this point, the server appears valid and is in the room, so ask it to check ++ # the event. ++ recommendation = await self._federation_client.get_pdu_policy_recommendation( ++ policy_server, event ++ ) ++ if recommendation != RECOMMENDATION_OK: ++ return False ++ ++ return True # default allow +diff --git a/synapse/server.py b/synapse/server.py +index bd2faa61b9..2add4d4e6e 100644 +--- a/synapse/server.py ++++ b/synapse/server.py +@@ -107,6 +107,7 @@ from synapse.handlers.room_member import ( + RoomMemberMasterHandler, + ) + from synapse.handlers.room_member_worker import RoomMemberWorkerHandler ++from synapse.handlers.room_policy import RoomPolicyHandler + from synapse.handlers.room_summary import RoomSummaryHandler + from synapse.handlers.search import SearchHandler + from synapse.handlers.send_email import SendEmailHandler +@@ -807,6 +808,10 @@ class HomeServer(metaclass=abc.ABCMeta): + + return OidcHandler(self) + ++ @cache_in_self ++ def get_room_policy_handler(self) -> RoomPolicyHandler: ++ return RoomPolicyHandler(self) ++ + @cache_in_self + def get_event_client_serializer(self) -> EventClientSerializer: + return EventClientSerializer(self) +diff --git a/synapse/types/handlers/policy_server.py b/synapse/types/handlers/policy_server.py +new file mode 100644 +index 0000000000..bfc09dabf4 +--- /dev/null ++++ b/synapse/types/handlers/policy_server.py +@@ -0,0 +1,16 @@ ++# ++# This file is licensed under the Affero General Public License (AGPL) version 3. ++# ++# Copyright (C) 2025 New Vector, Ltd ++# ++# This program is free software: you can redistribute it and/or modify ++# it under the terms of the GNU Affero General Public License as ++# published by the Free Software Foundation, either version 3 of the ++# License, or (at your option) any later version. ++# ++# See the GNU Affero General Public License for more details: ++# <https://www.gnu.org/licenses/agpl-3.0.html>. ++# ++ ++RECOMMENDATION_OK = "ok" ++RECOMMENDATION_SPAM = "spam" +diff --git a/tests/handlers/test_room_policy.py b/tests/handlers/test_room_policy.py +new file mode 100644 +index 0000000000..26642c18ea +--- /dev/null ++++ b/tests/handlers/test_room_policy.py +@@ -0,0 +1,226 @@ ++# ++# This file is licensed under the Affero General Public License (AGPL) version 3. ++# ++# Copyright (C) 2025 New Vector, Ltd ++# ++# This program is free software: you can redistribute it and/or modify ++# it under the terms of the GNU Affero General Public License as ++# published by the Free Software Foundation, either version 3 of the ++# License, or (at your option) any later version. ++# ++# See the GNU Affero General Public License for more details: ++# <https://www.gnu.org/licenses/agpl-3.0.html>. ++# ++# ++from typing import Optional ++from unittest import mock ++ ++from twisted.test.proto_helpers import MemoryReactor ++ ++from synapse.events import EventBase, make_event_from_dict ++from synapse.rest import admin ++from synapse.rest.client import login, room ++from synapse.server import HomeServer ++from synapse.types import JsonDict, UserID ++from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM ++from synapse.util import Clock ++ ++from tests import unittest ++from tests.test_utils import event_injection ++ ++ ++class RoomPolicyTestCase(unittest.FederatingHomeserverTestCase): ++ """Tests room policy handler.""" ++ ++ servlets = [ ++ admin.register_servlets, ++ login.register_servlets, ++ room.register_servlets, ++ ] ++ ++ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: ++ # mock out the federation transport client ++ self.mock_federation_transport_client = mock.Mock( ++ spec=["get_policy_recommendation_for_pdu"] ++ ) ++ self.mock_federation_transport_client.get_policy_recommendation_for_pdu = ( ++ mock.AsyncMock() ++ ) ++ return super().setup_test_homeserver( ++ federation_transport_client=self.mock_federation_transport_client ++ ) ++ ++ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ++ self.hs = hs ++ self.handler = hs.get_room_policy_handler() ++ main_store = self.hs.get_datastores().main ++ ++ # Create a room ++ self.creator = self.register_user("creator", "test1234") ++ self.creator_token = self.login("creator", "test1234") ++ self.room_id = self.helper.create_room_as( ++ room_creator=self.creator, tok=self.creator_token ++ ) ++ room_version = self.get_success(main_store.get_room_version(self.room_id)) ++ ++ # Create some sample events ++ self.spammy_event = make_event_from_dict( ++ room_version=room_version, ++ internal_metadata_dict={}, ++ event_dict={ ++ "room_id": self.room_id, ++ "type": "m.room.message", ++ "sender": "@spammy:example.org", ++ "content": { ++ "msgtype": "m.text", ++ "body": "This is a spammy event.", ++ }, ++ }, ++ ) ++ self.not_spammy_event = make_event_from_dict( ++ room_version=room_version, ++ internal_metadata_dict={}, ++ event_dict={ ++ "room_id": self.room_id, ++ "type": "m.room.message", ++ "sender": "@not_spammy:example.org", ++ "content": { ++ "msgtype": "m.text", ++ "body": "This is a NOT spammy event.", ++ }, ++ }, ++ ) ++ ++ # Prepare the policy server mock to decide spam vs not spam on those events ++ self.call_count = 0 ++ ++ async def get_policy_recommendation_for_pdu( ++ destination: str, ++ pdu: EventBase, ++ timeout: Optional[int] = None, ++ ) -> JsonDict: ++ self.call_count += 1 ++ self.assertEqual(destination, self.OTHER_SERVER_NAME) ++ if pdu.event_id == self.spammy_event.event_id: ++ return {"recommendation": RECOMMENDATION_SPAM} ++ elif pdu.event_id == self.not_spammy_event.event_id: ++ return {"recommendation": RECOMMENDATION_OK} ++ else: ++ self.fail("Unexpected event ID") ++ ++ self.mock_federation_transport_client.get_policy_recommendation_for_pdu.side_effect = get_policy_recommendation_for_pdu ++ ++ def _add_policy_server_to_room(self) -> None: ++ # Inject a member event into the room ++ policy_user_id = f"@policy:{self.OTHER_SERVER_NAME}" ++ self.get_success( ++ event_injection.inject_member_event( ++ self.hs, self.room_id, policy_user_id, "join" ++ ) ++ ) ++ self.helper.send_state( ++ self.room_id, ++ "org.matrix.msc4284.policy", ++ { ++ "via": self.OTHER_SERVER_NAME, ++ }, ++ tok=self.creator_token, ++ state_key="", ++ ) ++ ++ def test_no_policy_event_set(self) -> None: ++ # We don't need to modify the room state at all - we're testing the default ++ # case where a room doesn't use a policy server. ++ ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) ++ self.assertEqual(ok, True) ++ self.assertEqual(self.call_count, 0) ++ ++ def test_empty_policy_event_set(self) -> None: ++ self.helper.send_state( ++ self.room_id, ++ "org.matrix.msc4284.policy", ++ { ++ # empty content (no `via`) ++ }, ++ tok=self.creator_token, ++ state_key="", ++ ) ++ ++ ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) ++ self.assertEqual(ok, True) ++ self.assertEqual(self.call_count, 0) ++ ++ def test_nonstring_policy_event_set(self) -> None: ++ self.helper.send_state( ++ self.room_id, ++ "org.matrix.msc4284.policy", ++ { ++ "via": 42, # should be a server name ++ }, ++ tok=self.creator_token, ++ state_key="", ++ ) ++ ++ ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) ++ self.assertEqual(ok, True) ++ self.assertEqual(self.call_count, 0) ++ ++ def test_self_policy_event_set(self) -> None: ++ self.helper.send_state( ++ self.room_id, ++ "org.matrix.msc4284.policy", ++ { ++ # We ignore events when the policy server is ourselves (for now?) ++ "via": (UserID.from_string(self.creator)).domain, ++ }, ++ tok=self.creator_token, ++ state_key="", ++ ) ++ ++ ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) ++ self.assertEqual(ok, True) ++ self.assertEqual(self.call_count, 0) ++ ++ def test_invalid_server_policy_event_set(self) -> None: ++ self.helper.send_state( ++ self.room_id, ++ "org.matrix.msc4284.policy", ++ { ++ "via": "|this| is *not* a (valid) server name.com", ++ }, ++ tok=self.creator_token, ++ state_key="", ++ ) ++ ++ ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) ++ self.assertEqual(ok, True) ++ self.assertEqual(self.call_count, 0) ++ ++ def test_not_in_room_policy_event_set(self) -> None: ++ self.helper.send_state( ++ self.room_id, ++ "org.matrix.msc4284.policy", ++ { ++ "via": f"x.{self.OTHER_SERVER_NAME}", ++ }, ++ tok=self.creator_token, ++ state_key="", ++ ) ++ ++ ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) ++ self.assertEqual(ok, True) ++ self.assertEqual(self.call_count, 0) ++ ++ def test_spammy_event_is_spam(self) -> None: ++ self._add_policy_server_to_room() ++ ++ ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) ++ self.assertEqual(ok, False) ++ self.assertEqual(self.call_count, 1) ++ ++ def test_not_spammy_event_is_not_spam(self) -> None: ++ self._add_policy_server_to_room() ++ ++ ok = self.get_success(self.handler.is_event_allowed(self.not_spammy_event)) ++ self.assertEqual(ok, True) ++ self.assertEqual(self.call_count, 1) +-- +2.49.0 + diff --git a/packages/overlays/matrix-synapse/patches/0024-Readme-tweaks-18218.patch b/packages/overlays/matrix-synapse/patches/0024-Readme-tweaks-18218.patch deleted file mode 100644
index 735681a..0000000 --- a/packages/overlays/matrix-synapse/patches/0024-Readme-tweaks-18218.patch +++ /dev/null
@@ -1,48 +0,0 @@ -From f2ca2e31f7c5b1627554f85adead37212736bf5a Mon Sep 17 00:00:00 2001 -From: Andrew Ferrazzutti <andrewf@element.io> -Date: Fri, 2 May 2025 06:11:48 -0400 -Subject: [PATCH 24/74] Readme tweaks (#18218) - ---- - README.rst | 12 +++++++----- - changelog.d/18218.doc | 1 + - 2 files changed, 8 insertions(+), 5 deletions(-) - create mode 100644 changelog.d/18218.doc - -diff --git a/README.rst b/README.rst -index 77f861e788..8974990ed1 100644 ---- a/README.rst -+++ b/README.rst -@@ -253,15 +253,17 @@ Alongside all that, join our developer community on Matrix: - Copyright and Licensing - ======================= - --Copyright 2014-2017 OpenMarket Ltd --Copyright 2017 Vector Creations Ltd --Copyright 2017-2025 New Vector Ltd -+| Copyright 2014-2017 OpenMarket Ltd -+| Copyright 2017 Vector Creations Ltd -+| Copyright 2017-2025 New Vector Ltd -+| - - This software is dual-licensed by New Vector Ltd (Element). It can be used either: -- -+ - (1) for free under the terms of the GNU Affero General Public License (as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version); OR -- -+ - (2) under the terms of a paid-for Element Commercial License agreement between you and Element (the terms of which may vary depending on what you and Element have agreed to). -+ - Unless required by applicable law or agreed to in writing, software distributed under the Licenses is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the Licenses for the specific language governing permissions and limitations under the Licenses. - - -diff --git a/changelog.d/18218.doc b/changelog.d/18218.doc -new file mode 100644 -index 0000000000..f62da6a0b9 ---- /dev/null -+++ b/changelog.d/18218.doc -@@ -0,0 +1 @@ -+Improve formatting of the README file. --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0025-Bump-pyo3-from-0.23.5-to-0.24.2-18460.patch b/packages/overlays/matrix-synapse/patches/0025-Bump-pyo3-from-0.23.5-to-0.24.2-18460.patch new file mode 100644
index 0000000..7d1ee5d --- /dev/null +++ b/packages/overlays/matrix-synapse/patches/0025-Bump-pyo3-from-0.23.5-to-0.24.2-18460.patch
@@ -0,0 +1,166 @@ +From ed6b7ba9c3da3add7a1551069411fa3697b0efc4 Mon Sep 17 00:00:00 2001 +From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> +Date: Wed, 21 May 2025 23:12:01 +0100 +Subject: [PATCH 25/34] Bump pyo3 from 0.23.5 to 0.24.2 (#18460) + +Also bump pythonize from 0.23.0 to 0.24.0, otherwise we couldn't compile +as pythonize 0.23.0 required pyo3 "^0.23.0". + +Addresses +[RUSTSEC-2025-0020](https://rustsec.org/advisories/RUSTSEC-2025-0020), +although Synapse is not affected as we don't make use of +`PyString::from_object`. + +[pyo3 0.24.x](https://github.com/PyO3/pyo3/releases/tag/v0.24.0) include +some performance optimisations apparently, and no breaking changes. + +### Pull Request Checklist + +<!-- Please read +https://element-hq.github.io/synapse/latest/development/contributing_guide.html +before submitting your pull request --> + +* [x] Pull request is based on the develop branch +* [x] Pull request includes a [changelog +file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). +The entry should: +- Be a short description of your change which makes sense to users. +"Fixed a bug that prevented receiving messages from other servers." +instead of "Moved X method from `EventStore` to `EventWorkerStore`.". + - Use markdown where necessary, mostly for `code blocks`. + - End with either a period (.) or an exclamation mark (!). + - Start with a capital letter. +- Feel free to credit yourself, by adding a sentence "Contributed by +@github_username." or "Contributed by [Your Name]." to the end of the +entry. +* [x] [Code +style](https://element-hq.github.io/synapse/latest/code_style.html) is +correct (run the +[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) +--- + Cargo.lock | 28 ++++++++++++++-------------- + changelog.d/18460.misc | 1 + + rust/Cargo.toml | 4 ++-- + 3 files changed, 17 insertions(+), 16 deletions(-) + create mode 100644 changelog.d/18460.misc + +diff --git a/Cargo.lock b/Cargo.lock +index 13156e67b5..980dff6987 100644 +--- a/Cargo.lock ++++ b/Cargo.lock +@@ -277,9 +277,9 @@ dependencies = [ + + [[package]] + name = "pyo3" +-version = "0.23.5" ++version = "0.24.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "7778bffd85cf38175ac1f545509665d0b9b92a198ca7941f131f85f7a4f9a872" ++checksum = "e5203598f366b11a02b13aa20cab591229ff0a89fd121a308a5df751d5fc9219" + dependencies = [ + "anyhow", + "cfg-if", +@@ -296,9 +296,9 @@ dependencies = [ + + [[package]] + name = "pyo3-build-config" +-version = "0.23.5" ++version = "0.24.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "94f6cbe86ef3bf18998d9df6e0f3fc1050a8c5efa409bf712e661a4366e010fb" ++checksum = "99636d423fa2ca130fa5acde3059308006d46f98caac629418e53f7ebb1e9999" + dependencies = [ + "once_cell", + "target-lexicon", +@@ -306,9 +306,9 @@ dependencies = [ + + [[package]] + name = "pyo3-ffi" +-version = "0.23.5" ++version = "0.24.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e9f1b4c431c0bb1c8fb0a338709859eed0d030ff6daa34368d3b152a63dfdd8d" ++checksum = "78f9cf92ba9c409279bc3305b5409d90db2d2c22392d443a87df3a1adad59e33" + dependencies = [ + "libc", + "pyo3-build-config", +@@ -327,9 +327,9 @@ dependencies = [ + + [[package]] + name = "pyo3-macros" +-version = "0.23.5" ++version = "0.24.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "fbc2201328f63c4710f68abdf653c89d8dbc2858b88c5d88b0ff38a75288a9da" ++checksum = "0b999cb1a6ce21f9a6b147dcf1be9ffedf02e0043aec74dc390f3007047cecd9" + dependencies = [ + "proc-macro2", + "pyo3-macros-backend", +@@ -339,9 +339,9 @@ dependencies = [ + + [[package]] + name = "pyo3-macros-backend" +-version = "0.23.5" ++version = "0.24.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "fca6726ad0f3da9c9de093d6f116a93c1a38e417ed73bf138472cf4064f72028" ++checksum = "822ece1c7e1012745607d5cf0bcb2874769f0f7cb34c4cde03b9358eb9ef911a" + dependencies = [ + "heck", + "proc-macro2", +@@ -352,9 +352,9 @@ dependencies = [ + + [[package]] + name = "pythonize" +-version = "0.23.0" ++version = "0.24.0" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "91a6ee7a084f913f98d70cdc3ebec07e852b735ae3059a1500db2661265da9ff" ++checksum = "d5bcac0d0b71821f0d69e42654f1e15e5c94b85196446c4de9588951a2117e7b" + dependencies = [ + "pyo3", + "serde", +@@ -532,9 +532,9 @@ dependencies = [ + + [[package]] + name = "target-lexicon" +-version = "0.12.14" ++version = "0.13.2" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" ++checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" + + [[package]] + name = "typenum" +diff --git a/changelog.d/18460.misc b/changelog.d/18460.misc +new file mode 100644 +index 0000000000..5aa19683eb +--- /dev/null ++++ b/changelog.d/18460.misc +@@ -0,0 +1 @@ ++Bump pyo3 from 0.23.5 to 0.24.2. +\ No newline at end of file +diff --git a/rust/Cargo.toml b/rust/Cargo.toml +index 651b268f86..840988e74e 100644 +--- a/rust/Cargo.toml ++++ b/rust/Cargo.toml +@@ -30,14 +30,14 @@ http = "1.1.0" + lazy_static = "1.4.0" + log = "0.4.17" + mime = "0.3.17" +-pyo3 = { version = "0.23.5", features = [ ++pyo3 = { version = "0.24.2", features = [ + "macros", + "anyhow", + "abi3", + "abi3-py39", + ] } + pyo3-log = "0.12.0" +-pythonize = "0.23.0" ++pythonize = "0.24.0" + regex = "1.6.0" + sha2 = "0.10.8" + serde = { version = "1.0.144", features = ["derive"] } +-- +2.49.0 + diff --git a/packages/overlays/matrix-synapse/patches/0025-Do-not-auto-provision-missing-users-devices-when-del.patch b/packages/overlays/matrix-synapse/patches/0025-Do-not-auto-provision-missing-users-devices-when-del.patch deleted file mode 100644
index 892a43f..0000000 --- a/packages/overlays/matrix-synapse/patches/0025-Do-not-auto-provision-missing-users-devices-when-del.patch +++ /dev/null
@@ -1,129 +0,0 @@ -From 74be5cfdbc2208f0b34d9ab75f99994bd8ed217d Mon Sep 17 00:00:00 2001 -From: Quentin Gliech <quenting@element.io> -Date: Fri, 2 May 2025 12:13:26 +0200 -Subject: [PATCH 25/74] Do not auto-provision missing users & devices when - delegating auth to MAS (#18181) - -Since MAS 0.13.0, the provisionning of devices and users is done -synchronously and reliably enough that we don't need to auto-provision -on the Synapse side anymore. - -It's important to remove this behaviour if we want to start caching -token introspection results. ---- - changelog.d/18181.misc | 1 + - synapse/api/auth/msc3861_delegated.py | 39 +++++++------------------ - tests/handlers/test_oauth_delegation.py | 10 +++++++ - 3 files changed, 22 insertions(+), 28 deletions(-) - create mode 100644 changelog.d/18181.misc - -diff --git a/changelog.d/18181.misc b/changelog.d/18181.misc -new file mode 100644 -index 0000000000..d9ba2f1dd1 ---- /dev/null -+++ b/changelog.d/18181.misc -@@ -0,0 +1 @@ -+Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later. -diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py -index 9ded3366e3..e500a06afe 100644 ---- a/synapse/api/auth/msc3861_delegated.py -+++ b/synapse/api/auth/msc3861_delegated.py -@@ -39,7 +39,6 @@ from synapse.api.errors import ( - HttpResponseException, - InvalidClientTokenError, - OAuthInsufficientScopeError, -- StoreError, - SynapseError, - UnrecognizedRequestError, - ) -@@ -512,7 +511,7 @@ class MSC3861DelegatedAuth(BaseAuth): - raise InvalidClientTokenError("No scope in token granting user rights") - - # Match via the sub claim -- sub: Optional[str] = introspection_result.get_sub() -+ sub = introspection_result.get_sub() - if sub is None: - raise InvalidClientTokenError( - "Invalid sub claim in the introspection result" -@@ -525,29 +524,20 @@ class MSC3861DelegatedAuth(BaseAuth): - # If we could not find a user via the external_id, it either does not exist, - # or the external_id was never recorded - -- # TODO: claim mapping should be configurable -- username: Optional[str] = introspection_result.get_username() -- if username is None or not isinstance(username, str): -+ username = introspection_result.get_username() -+ if username is None: - raise AuthError( - 500, - "Invalid username claim in the introspection result", - ) - user_id = UserID(username, self._hostname) - -- # First try to find a user from the username claim -+ # Try to find a user from the username claim - user_info = await self.store.get_user_by_id(user_id=user_id.to_string()) - if user_info is None: -- # If the user does not exist, we should create it on the fly -- # TODO: we could use SCIM to provision users ahead of time and listen -- # for SCIM SET events if those ever become standard: -- # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 -- -- # TODO: claim mapping should be configurable -- # If present, use the name claim as the displayname -- name: Optional[str] = introspection_result.get_name() -- -- await self.store.register_user( -- user_id=user_id.to_string(), create_profile_with_displayname=name -+ raise AuthError( -+ 500, -+ "User not found", - ) - - # And record the sub as external_id -@@ -587,17 +577,10 @@ class MSC3861DelegatedAuth(BaseAuth): - "Invalid device ID in introspection result", - ) - -- # Create the device on the fly if it does not exist -- try: -- await self.store.get_device( -- user_id=user_id.to_string(), device_id=device_id -- ) -- except StoreError: -- await self.store.store_device( -- user_id=user_id.to_string(), -- device_id=device_id, -- initial_device_display_name="OIDC-native client", -- ) -+ # Make sure the device exists -+ await self.store.get_device( -+ user_id=user_id.to_string(), device_id=device_id -+ ) - - # TODO: there is a few things missing in the requester here, which still need - # to be figured out, like: -diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py -index 034a1594d9..934bfee0bc 100644 ---- a/tests/handlers/test_oauth_delegation.py -+++ b/tests/handlers/test_oauth_delegation.py -@@ -147,6 +147,16 @@ class MSC3861OAuthDelegation(HomeserverTestCase): - - return hs - -+ def prepare( -+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer -+ ) -> None: -+ # Provision the user and the device we use in the tests. -+ store = homeserver.get_datastores().main -+ self.get_success(store.register_user(USER_ID)) -+ self.get_success( -+ store.store_device(USER_ID, DEVICE, initial_device_display_name=None) -+ ) -+ - def _assertParams(self) -> None: - """Assert that the request parameters are correct.""" - params = parse_qs(self.http_client.request.call_args[1]["data"].decode("utf-8")) --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0026-Bump-Tornado-from-6.4.2-to-6.5.0-18459.patch b/packages/overlays/matrix-synapse/patches/0026-Bump-Tornado-from-6.4.2-to-6.5.0-18459.patch new file mode 100644
index 0000000..f4f4784 --- /dev/null +++ b/packages/overlays/matrix-synapse/patches/0026-Bump-Tornado-from-6.4.2-to-6.5.0-18459.patch
@@ -0,0 +1,93 @@ +From 162407319103a9f553225a925017cf3f951a4644 Mon Sep 17 00:00:00 2001 +From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> +Date: Wed, 21 May 2025 23:24:12 +0100 +Subject: [PATCH 26/34] Bump Tornado from 6.4.2 to 6.5.0 (#18459) + +Bumps tornado 6.5.0 to mitigate +[CVE-2025-47287](https://nvd.nist.gov/vuln/detail/CVE-2025-47287). + +This dependency is only used indirectly through our sentry dependency. + +### Pull Request Checklist + +<!-- Please read +https://element-hq.github.io/synapse/latest/development/contributing_guide.html +before submitting your pull request --> + +* [x] Pull request is based on the develop branch +* [x] Pull request includes a [changelog +file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). +The entry should: +- Be a short description of your change which makes sense to users. +"Fixed a bug that prevented receiving messages from other servers." +instead of "Moved X method from `EventStore` to `EventWorkerStore`.". + - Use markdown where necessary, mostly for `code blocks`. + - End with either a period (.) or an exclamation mark (!). + - Start with a capital letter. +- Feel free to credit yourself, by adding a sentence "Contributed by +@github_username." or "Contributed by [Your Name]." to the end of the +entry. +* [ ] [Code +style](https://element-hq.github.io/synapse/latest/code_style.html) is +correct (run the +[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) +--- + changelog.d/18459.misc | 1 + + poetry.lock | 27 ++++++++++++++------------- + 2 files changed, 15 insertions(+), 13 deletions(-) + create mode 100644 changelog.d/18459.misc + +diff --git a/changelog.d/18459.misc b/changelog.d/18459.misc +new file mode 100644 +index 0000000000..e148825696 +--- /dev/null ++++ b/changelog.d/18459.misc +@@ -0,0 +1 @@ ++Bump tornado from 6.4.2 to 6.5.0. +\ No newline at end of file +diff --git a/poetry.lock b/poetry.lock +index ada0646215..9938e46780 100644 +--- a/poetry.lock ++++ b/poetry.lock +@@ -2767,24 +2767,25 @@ files = [ + + [[package]] + name = "tornado" +-version = "6.4.2" ++version = "6.5" + description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." + optional = true +-python-versions = ">=3.8" ++python-versions = ">=3.9" + groups = ["main"] + markers = "extra == \"all\" or extra == \"opentracing\"" + files = [ +- {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"}, +- {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"}, +- {file = "tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec"}, +- {file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946"}, +- {file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf"}, +- {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634"}, +- {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73"}, +- {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c"}, +- {file = "tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482"}, +- {file = "tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38"}, +- {file = "tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b"}, ++ {file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"}, ++ {file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"}, ++ {file = "tornado-6.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c625b9d03f1fb4d64149c47d0135227f0434ebb803e2008040eb92906b0105a"}, ++ {file = "tornado-6.5-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a0d8d2309faf015903080fb5bdd969ecf9aa5ff893290845cf3fd5b2dd101bc"}, ++ {file = "tornado-6.5-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03576ab51e9b1677e4cdaae620d6700d9823568b7939277e4690fe4085886c55"}, ++ {file = "tornado-6.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ab75fe43d0e1b3a5e3ceddb2a611cb40090dd116a84fc216a07a298d9e000471"}, ++ {file = "tornado-6.5-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:119c03f440a832128820e87add8a175d211b7f36e7ee161c631780877c28f4fb"}, ++ {file = "tornado-6.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:231f2193bb4c28db2bdee9e57bc6ca0cd491f345cd307c57d79613b058e807e0"}, ++ {file = "tornado-6.5-cp39-abi3-win32.whl", hash = "sha256:fd20c816e31be1bbff1f7681f970bbbd0bb241c364220140228ba24242bcdc59"}, ++ {file = "tornado-6.5-cp39-abi3-win_amd64.whl", hash = "sha256:007f036f7b661e899bd9ef3fa5f87eb2cb4d1b2e7d67368e778e140a2f101a7a"}, ++ {file = "tornado-6.5-cp39-abi3-win_arm64.whl", hash = "sha256:542e380658dcec911215c4820654662810c06ad872eefe10def6a5e9b20e9633"}, ++ {file = "tornado-6.5.tar.gz", hash = "sha256:c70c0a26d5b2d85440e4debd14a8d0b463a0cf35d92d3af05f5f1ffa8675c826"}, + ] + + [[package]] +-- +2.49.0 + diff --git a/packages/overlays/matrix-synapse/patches/0026-Fix-typo-in-doc-for-Scheduled-Tasks-Admin-API-18384.patch b/packages/overlays/matrix-synapse/patches/0026-Fix-typo-in-doc-for-Scheduled-Tasks-Admin-API-18384.patch deleted file mode 100644
index 4635d35..0000000 --- a/packages/overlays/matrix-synapse/patches/0026-Fix-typo-in-doc-for-Scheduled-Tasks-Admin-API-18384.patch +++ /dev/null
@@ -1,34 +0,0 @@ -From ea376126a0b7e3fbc0df6ac827eba87d98e479de Mon Sep 17 00:00:00 2001 -From: Shay <hillerys@element.io> -Date: Fri, 2 May 2025 04:14:31 -0700 -Subject: [PATCH 26/74] Fix typo in doc for Scheduled Tasks Admin API (#18384) - ---- - changelog.d/18384.doc | 1 + - docs/admin_api/scheduled_tasks.md | 2 +- - 2 files changed, 2 insertions(+), 1 deletion(-) - create mode 100644 changelog.d/18384.doc - -diff --git a/changelog.d/18384.doc b/changelog.d/18384.doc -new file mode 100644 -index 0000000000..ebcd029639 ---- /dev/null -+++ b/changelog.d/18384.doc -@@ -0,0 +1 @@ -+Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. -diff --git a/docs/admin_api/scheduled_tasks.md b/docs/admin_api/scheduled_tasks.md -index 1708871a6d..b80da5083c 100644 ---- a/docs/admin_api/scheduled_tasks.md -+++ b/docs/admin_api/scheduled_tasks.md -@@ -19,7 +19,7 @@ It returns a JSON body like the following: - "id": "GSA124oegf1", - "action": "shutdown_room", - "status": "complete", -- "timestamp": 23423523, -+ "timestamp_ms": 23423523, - "resource_id": "!roomid", - "result": "some result", - "error": null --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0027-Don-t-check-the-at_hash-access-token-hash-in-OIDC-ID.patch b/packages/overlays/matrix-synapse/patches/0027-Don-t-check-the-at_hash-access-token-hash-in-OIDC-ID.patch deleted file mode 100644
index 53b5462..0000000 --- a/packages/overlays/matrix-synapse/patches/0027-Don-t-check-the-at_hash-access-token-hash-in-OIDC-ID.patch +++ /dev/null
@@ -1,177 +0,0 @@ -From fd5d3d852df9dbbac13b406144be7ec5a807078d Mon Sep 17 00:00:00 2001 -From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> -Date: Fri, 2 May 2025 12:16:14 +0100 -Subject: [PATCH 27/74] Don't check the `at_hash` (access token hash) in OIDC - ID Tokens if we don't use the access token (#18374) - -Co-authored-by: Eric Eastwood <erice@element.io> ---- - changelog.d/18374.misc | 1 + - synapse/handlers/oidc.py | 29 ++++++++++++++++++++++-- - tests/handlers/test_oidc.py | 44 +++++++++++++++++++++++++++++++++++++ - tests/test_utils/oidc.py | 19 ++++++++++++++-- - 4 files changed, 89 insertions(+), 4 deletions(-) - create mode 100644 changelog.d/18374.misc - -diff --git a/changelog.d/18374.misc b/changelog.d/18374.misc -new file mode 100644 -index 0000000000..a8efca68d0 ---- /dev/null -+++ b/changelog.d/18374.misc -@@ -0,0 +1 @@ -+Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token. -\ No newline at end of file -diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py -index c4cf0636a3..fb759172b3 100644 ---- a/synapse/handlers/oidc.py -+++ b/synapse/handlers/oidc.py -@@ -586,6 +586,24 @@ class OidcProvider: - or self._user_profile_method == "userinfo_endpoint" - ) - -+ @property -+ def _uses_access_token(self) -> bool: -+ """Return True if the `access_token` will be used during the login process. -+ -+ This is useful to determine whether the access token -+ returned by the identity provider, and -+ any related metadata (such as the `at_hash` field in -+ the ID token), should be validated. -+ """ -+ # Currently, Synapse only uses the access_token to fetch user metadata -+ # from the userinfo endpoint. Therefore we only have a single criteria -+ # to check right now but this may change in the future and this function -+ # should be updated if more usages are introduced. -+ # -+ # For example, if we start to use the access_token given to us by the -+ # IdP for more things, such as accessing Resource Server APIs. -+ return self._uses_userinfo -+ - @property - def issuer(self) -> str: - """The issuer identifying this provider.""" -@@ -957,9 +975,16 @@ class OidcProvider: - "nonce": nonce, - "client_id": self._client_auth.client_id, - } -- if "access_token" in token: -+ if self._uses_access_token and "access_token" in token: - # If we got an `access_token`, there should be an `at_hash` claim -- # in the `id_token` that we can check against. -+ # in the `id_token` that we can check against. Setting this -+ # instructs authlib to check the value of `at_hash` in the -+ # ID token. -+ # -+ # We only need to verify the access token if we actually make -+ # use of it. Which currently only happens when we need to fetch -+ # the user's information from the userinfo_endpoint. Thus, this -+ # check is also gated on self._uses_userinfo. - claims_params["access_token"] = token["access_token"] - - claims_options = {"iss": {"values": [metadata["issuer"]]}} -diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py -index a7cead83d0..e5f31d57ca 100644 ---- a/tests/handlers/test_oidc.py -+++ b/tests/handlers/test_oidc.py -@@ -1029,6 +1029,50 @@ class OidcHandlerTestCase(HomeserverTestCase): - args = parse_qs(kwargs["data"].decode("utf-8")) - self.assertEqual(args["redirect_uri"], [TEST_REDIRECT_URI]) - -+ @override_config( -+ { -+ "oidc_config": { -+ **DEFAULT_CONFIG, -+ "redirect_uri": TEST_REDIRECT_URI, -+ } -+ } -+ ) -+ def test_code_exchange_ignores_access_token(self) -> None: -+ """ -+ Code exchange completes successfully and doesn't validate the `at_hash` -+ (access token hash) field of an ID token when the access token isn't -+ going to be used. -+ -+ The access token won't be used in this test because Synapse (currently) -+ only needs it to fetch a user's metadata if it isn't included in the ID -+ token itself. -+ -+ Because we have included "openid" in the requested scopes for this IdP -+ (see `SCOPES`), user metadata is be included in the ID token. Thus the -+ access token isn't needed, and it's unnecessary for Synapse to validate -+ the access token. -+ -+ This is a regression test for a situation where an upstream identity -+ provider was providing an invalid `at_hash` value, which Synapse errored -+ on, yet Synapse wasn't using the access token for anything. -+ """ -+ # Exchange the code against the fake IdP. -+ userinfo = { -+ "sub": "foo", -+ "username": "foo", -+ "phone": "1234567", -+ } -+ with self.fake_server.id_token_override( -+ { -+ "at_hash": "invalid-hash", -+ } -+ ): -+ request, _ = self.start_authorization(userinfo) -+ self.get_success(self.handler.handle_oidc_callback(request)) -+ -+ # If no error was rendered, then we have success. -+ self.render_error.assert_not_called() -+ - @override_config( - { - "oidc_config": { -diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py -index 6c4be1c1f8..5bf5e5cb0c 100644 ---- a/tests/test_utils/oidc.py -+++ b/tests/test_utils/oidc.py -@@ -20,7 +20,9 @@ - # - - -+import base64 - import json -+from hashlib import sha256 - from typing import Any, ContextManager, Dict, List, Optional, Tuple - from unittest.mock import Mock, patch - from urllib.parse import parse_qs -@@ -154,10 +156,23 @@ class FakeOidcServer: - json_payload = json.dumps(payload) - return jws.serialize_compact(protected, json_payload, self._key).decode("utf-8") - -- def generate_id_token(self, grant: FakeAuthorizationGrant) -> str: -+ def generate_id_token( -+ self, grant: FakeAuthorizationGrant, access_token: str -+ ) -> str: -+ # Generate a hash of the access token for the optional -+ # `at_hash` field in an ID Token. -+ # -+ # 3.1.3.6. ID Token, https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken -+ at_hash = ( -+ base64.urlsafe_b64encode(sha256(access_token.encode("ascii")).digest()[:16]) -+ .rstrip(b"=") -+ .decode("ascii") -+ ) -+ - now = int(self._clock.time()) - id_token = { - **grant.userinfo, -+ "at_hash": at_hash, - "iss": self.issuer, - "aud": grant.client_id, - "iat": now, -@@ -243,7 +258,7 @@ class FakeOidcServer: - } - - if "openid" in grant.scope: -- token["id_token"] = self.generate_id_token(grant) -+ token["id_token"] = self.generate_id_token(grant, access_token) - - return dict(token) - --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0027-Don-t-move-invited-users-to-new-room-when-shutting-d.patch b/packages/overlays/matrix-synapse/patches/0027-Don-t-move-invited-users-to-new-room-when-shutting-d.patch new file mode 100644
index 0000000..bbec2e2 --- /dev/null +++ b/packages/overlays/matrix-synapse/patches/0027-Don-t-move-invited-users-to-new-room-when-shutting-d.patch
@@ -0,0 +1,118 @@ +From 24e849e483820762fa2d231ad702e6aeaa23829c Mon Sep 17 00:00:00 2001 +From: Shay <hillerys@element.io> +Date: Fri, 23 May 2025 01:59:40 -0700 +Subject: [PATCH 27/34] Don't move invited users to new room when shutting down + room (#18471) + +This is confusing to users who received unwanted invites. +--- + changelog.d/18471.misc | 1 + + synapse/handlers/room.py | 23 ++++++++++---------- + tests/rest/admin/test_room.py | 41 +++++++++++++++++++++++++++++++++++ + 3 files changed, 54 insertions(+), 11 deletions(-) + create mode 100644 changelog.d/18471.misc + +diff --git a/changelog.d/18471.misc b/changelog.d/18471.misc +new file mode 100644 +index 0000000000..b36712bea3 +--- /dev/null ++++ b/changelog.d/18471.misc +@@ -0,0 +1 @@ ++Don't move invited users to new room when shutting down room. +\ No newline at end of file +diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py +index 386375d64b..763f99e028 100644 +--- a/synapse/handlers/room.py ++++ b/synapse/handlers/room.py +@@ -1806,7 +1806,7 @@ class RoomShutdownHandler: + ] = None, + ) -> Optional[ShutdownRoomResponse]: + """ +- Shuts down a room. Moves all local users and room aliases automatically ++ Shuts down a room. Moves all joined local users and room aliases automatically + to a new room if `new_room_user_id` is set. Otherwise local users only + leave the room without any information. + +@@ -1949,16 +1949,17 @@ class RoomShutdownHandler: + + # Join users to new room + if new_room_user_id: +- assert new_room_id is not None +- await self.room_member_handler.update_membership( +- requester=target_requester, +- target=target_requester.user, +- room_id=new_room_id, +- action=Membership.JOIN, +- content={}, +- ratelimit=False, +- require_consent=False, +- ) ++ if membership == Membership.JOIN: ++ assert new_room_id is not None ++ await self.room_member_handler.update_membership( ++ requester=target_requester, ++ target=target_requester.user, ++ room_id=new_room_id, ++ action=Membership.JOIN, ++ content={}, ++ ratelimit=False, ++ require_consent=False, ++ ) + + result["kicked_users"].append(user_id) + if update_result_fct: +diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py +index 8d806082aa..e22dfcba1b 100644 +--- a/tests/rest/admin/test_room.py ++++ b/tests/rest/admin/test_room.py +@@ -369,6 +369,47 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): + self.assertEqual(200, channel.code, msg=channel.json_body) + self._is_blocked(room_id) + ++ def test_invited_users_not_joined_to_new_room(self) -> None: ++ """ ++ Test that when a new room id is provided, users who are only invited ++ but have not joined original room are not moved to new room. ++ """ ++ invitee = self.register_user("invitee", "pass") ++ ++ self.helper.invite( ++ self.room_id, self.other_user, invitee, tok=self.other_user_tok ++ ) ++ ++ # verify that user is invited ++ channel = self.make_request( ++ "GET", ++ f"/_matrix/client/v3/rooms/{self.room_id}/members?membership=invite", ++ access_token=self.other_user_tok, ++ ) ++ self.assertEqual(channel.code, 200) ++ self.assertEqual(len(channel.json_body["chunk"]), 1) ++ invite = channel.json_body["chunk"][0] ++ self.assertEqual(invite["state_key"], invitee) ++ ++ # shutdown room ++ channel = self.make_request( ++ "DELETE", ++ self.url, ++ {"new_room_user_id": self.admin_user}, ++ access_token=self.admin_user_tok, ++ ) ++ self.assertEqual(200, channel.code, msg=channel.json_body) ++ self.assertEqual(len(channel.json_body["kicked_users"]), 2) ++ ++ # joined member is moved to new room but invited user is not ++ users_in_room = self.get_success( ++ self.store.get_users_in_room(channel.json_body["new_room_id"]) ++ ) ++ self.assertNotIn(invitee, users_in_room) ++ self.assertIn(self.other_user, users_in_room) ++ self._is_purged(self.room_id) ++ self._has_no_members(self.room_id) ++ + def test_shutdown_room_consent(self) -> None: + """Test that we can shutdown rooms with local users who have not + yet accepted the privacy policy. This used to fail when we tried to +-- +2.49.0 + diff --git a/packages/overlays/matrix-synapse/patches/0028-Fix-lint-which-broke-in-18374-18385.patch b/packages/overlays/matrix-synapse/patches/0028-Fix-lint-which-broke-in-18374-18385.patch deleted file mode 100644
index 63ea40e..0000000 --- a/packages/overlays/matrix-synapse/patches/0028-Fix-lint-which-broke-in-18374-18385.patch +++ /dev/null
@@ -1,37 +0,0 @@ -From d18edf67d6f444c8dfa6a46e8769bbfa8d22f57b Mon Sep 17 00:00:00 2001 -From: Quentin Gliech <quenting@element.io> -Date: Fri, 2 May 2025 14:07:23 +0200 -Subject: [PATCH 28/74] Fix lint which broke in #18374 (#18385) - -https://github.com/element-hq/synapse/pull/18374 did not pass linting -but was merged ---- - changelog.d/18385.misc | 1 + - synapse/handlers/oidc.py | 2 +- - 2 files changed, 2 insertions(+), 1 deletion(-) - create mode 100644 changelog.d/18385.misc - -diff --git a/changelog.d/18385.misc b/changelog.d/18385.misc -new file mode 100644 -index 0000000000..a8efca68d0 ---- /dev/null -+++ b/changelog.d/18385.misc -@@ -0,0 +1 @@ -+Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token. -\ No newline at end of file -diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py -index fb759172b3..acf2d4bc8b 100644 ---- a/synapse/handlers/oidc.py -+++ b/synapse/handlers/oidc.py -@@ -599,7 +599,7 @@ class OidcProvider: - # from the userinfo endpoint. Therefore we only have a single criteria - # to check right now but this may change in the future and this function - # should be updated if more usages are introduced. -- # -+ # - # For example, if we start to use the access_token given to us by the - # IdP for more things, such as accessing Resource Server APIs. - return self._uses_userinfo --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0028-fix-device-handler-make-_maybe_retry_device_resync-t.patch b/packages/overlays/matrix-synapse/patches/0028-fix-device-handler-make-_maybe_retry_device_resync-t.patch new file mode 100644
index 0000000..87859bf --- /dev/null +++ b/packages/overlays/matrix-synapse/patches/0028-fix-device-handler-make-_maybe_retry_device_resync-t.patch
@@ -0,0 +1,94 @@ +From 33ba8860c43d4770ea119a09a4fcbbf366f3b32e Mon Sep 17 00:00:00 2001 +From: 3nprob <74199244+3nprob@users.noreply.github.com> +Date: Mon, 26 May 2025 14:21:43 +0000 +Subject: [PATCH 28/34] fix(device-handler): make _maybe_retry_device_resync + thread-safe (#18391) + +A race-condition may render concurrent retry loops. + +Use an actual `Lock` for guarding single access of device resyncing +retrying. + +### Pull Request Checklist + +* [x] Pull request is based on the develop branch +* [x] Pull request includes a [changelog +file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). +The entry should: +- Be a short description of your change which makes sense to users. +"Fixed a bug that prevented receiving messages from other servers." +instead of "Moved X method from `EventStore` to `EventWorkerStore`.". + - Use markdown where necessary, mostly for `code blocks`. + - End with either a period (.) or an exclamation mark (!). + - Start with a capital letter. +- Feel free to credit yourself, by adding a sentence "Contributed by +@github_username." or "Contributed by [Your Name]." to the end of the +entry. +* [x] [Code +style](https://element-hq.github.io/synapse/latest/code_style.html) is +correct +(run the +[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) +--- + changelog.d/18391.bugfix | 1 + + synapse/handlers/device.py | 13 +++++-------- + 2 files changed, 6 insertions(+), 8 deletions(-) + create mode 100644 changelog.d/18391.bugfix + +diff --git a/changelog.d/18391.bugfix b/changelog.d/18391.bugfix +new file mode 100644 +index 0000000000..bbcb7b7a28 +--- /dev/null ++++ b/changelog.d/18391.bugfix +@@ -0,0 +1 @@ ++Prevent race-condition in `_maybe_retry_device_resync` entrance. +diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py +index 1efd039f22..f8b547bbed 100644 +--- a/synapse/handlers/device.py ++++ b/synapse/handlers/device.py +@@ -20,6 +20,7 @@ + # + # + import logging ++from threading import Lock + from typing import ( + TYPE_CHECKING, + AbstractSet, +@@ -1237,7 +1238,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): + ) + + # Attempt to resync out of sync device lists every 30s. +- self._resync_retry_in_progress = False ++ self._resync_retry_lock = Lock() + self.clock.looping_call( + run_as_background_process, + 30 * 1000, +@@ -1419,13 +1420,10 @@ class DeviceListUpdater(DeviceListWorkerUpdater): + """Retry to resync device lists that are out of sync, except if another retry is + in progress. + """ +- if self._resync_retry_in_progress: ++ # If the lock can not be acquired we want to always return immediately instead of blocking here ++ if not self._resync_retry_lock.acquire(blocking=False): + return +- + try: +- # Prevent another call of this function to retry resyncing device lists so +- # we don't send too many requests. +- self._resync_retry_in_progress = True + # Get all of the users that need resyncing. + need_resync = await self.store.get_user_ids_requiring_device_list_resync() + +@@ -1466,8 +1464,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): + e, + ) + finally: +- # Allow future calls to retry resyncinc out of sync device lists. +- self._resync_retry_in_progress = False ++ self._resync_retry_lock.release() + + async def multi_user_device_resync( + self, user_ids: List[str], mark_failed_as_stale: bool = True +-- +2.49.0 + diff --git a/packages/overlays/matrix-synapse/patches/0029-Apply-should_drop_federated_event-to-federation-invi.patch b/packages/overlays/matrix-synapse/patches/0029-Apply-should_drop_federated_event-to-federation-invi.patch deleted file mode 100644
index ddc4c02..0000000 --- a/packages/overlays/matrix-synapse/patches/0029-Apply-should_drop_federated_event-to-federation-invi.patch +++ /dev/null
@@ -1,54 +0,0 @@ -From 411d239db47158cc14f94c94a86a5c713d783821 Mon Sep 17 00:00:00 2001 -From: Shay <hillerys@element.io> -Date: Fri, 2 May 2025 06:04:01 -0700 -Subject: [PATCH 29/74] Apply `should_drop_federated_event` to federation - invites (#18330) - -Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> ---- - changelog.d/18330.misc | 1 + - docs/modules/spam_checker_callbacks.md | 2 ++ - synapse/federation/federation_server.py | 6 ++++++ - 3 files changed, 9 insertions(+) - create mode 100644 changelog.d/18330.misc - -diff --git a/changelog.d/18330.misc b/changelog.d/18330.misc -new file mode 100644 -index 0000000000..dcf341fa34 ---- /dev/null -+++ b/changelog.d/18330.misc -@@ -0,0 +1 @@ -+Apply `should_drop_federated_event` to federation invites. -diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md -index c7f8606fd0..063099a127 100644 ---- a/docs/modules/spam_checker_callbacks.md -+++ b/docs/modules/spam_checker_callbacks.md -@@ -353,6 +353,8 @@ callback returns `False`, Synapse falls through to the next one. The value of th - callback that does not return `False` will be used. If this happens, Synapse will not call - any of the subsequent implementations of this callback. - -+Note that this check is applied to federation invites as of Synapse v1.130.0. -+ - - ### `check_login_for_spam` - -diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py -index f9e97ea13e..2f2c78babc 100644 ---- a/synapse/federation/federation_server.py -+++ b/synapse/federation/federation_server.py -@@ -701,6 +701,12 @@ class FederationServer(FederationBase): - pdu = event_from_pdu_json(content, room_version) - origin_host, _ = parse_server_name(origin) - await self.check_server_matches_acl(origin_host, pdu.room_id) -+ if await self._spam_checker_module_callbacks.should_drop_federated_event(pdu): -+ logger.info( -+ "Federated event contains spam, dropping %s", -+ pdu.event_id, -+ ) -+ raise SynapseError(403, Codes.FORBIDDEN) - try: - pdu = await self._check_sigs_and_hash(room_version, pdu) - except InvalidEventSignatureError as e: --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0072-Hotfix-ignore-rejected-events-in-delayed_events.patch b/packages/overlays/matrix-synapse/patches/0029-Hotfix-ignore-rejected-events-in-delayed_events.patch
index e326cd3..f31ce15 100644 --- a/packages/overlays/matrix-synapse/patches/0072-Hotfix-ignore-rejected-events-in-delayed_events.patch +++ b/packages/overlays/matrix-synapse/patches/0029-Hotfix-ignore-rejected-events-in-delayed_events.patch
@@ -1,7 +1,7 @@ -From bd2439ec4662f8ad9333797c02f4df764047ace5 Mon Sep 17 00:00:00 2001 +From 0c61ad92eee86fb135de689db54860f6b346a9da Mon Sep 17 00:00:00 2001 From: Rory& <root@rory.gay> Date: Sun, 20 Apr 2025 00:30:29 +0200 -Subject: [PATCH 72/74] Hotfix: ignore rejected events in delayed_events +Subject: [PATCH 29/34] Hotfix: ignore rejected events in delayed_events --- synapse/handlers/delayed_events.py | 7 ++++++- diff --git a/packages/overlays/matrix-synapse/patches/0073-Add-too-much-logging-to-room-summary-over-federation.patch b/packages/overlays/matrix-synapse/patches/0030-Add-too-much-logging-to-room-summary-over-federation.patch
index 31caf8a..c6aa256 100644 --- a/packages/overlays/matrix-synapse/patches/0073-Add-too-much-logging-to-room-summary-over-federation.patch +++ b/packages/overlays/matrix-synapse/patches/0030-Add-too-much-logging-to-room-summary-over-federation.patch
@@ -1,7 +1,7 @@ -From 5f6b610df67bdb57e4de09168923782b934c34fe Mon Sep 17 00:00:00 2001 +From 70c7508b48f0550f59cb2d3a534da524557c166f Mon Sep 17 00:00:00 2001 From: Rory& <root@rory.gay> Date: Wed, 23 Apr 2025 17:53:52 +0200 -Subject: [PATCH 73/74] Add too much logging to room summary over federation +Subject: [PATCH 30/34] Add too much logging to room summary over federation Signed-off-by: Rory& <root@rory.gay> --- diff --git a/packages/overlays/matrix-synapse/patches/0030-Allow-a-few-admin-APIs-used-by-MAS-to-run-on-workers.patch b/packages/overlays/matrix-synapse/patches/0030-Allow-a-few-admin-APIs-used-by-MAS-to-run-on-workers.patch deleted file mode 100644
index 1aecab2..0000000 --- a/packages/overlays/matrix-synapse/patches/0030-Allow-a-few-admin-APIs-used-by-MAS-to-run-on-workers.patch +++ /dev/null
@@ -1,699 +0,0 @@ -From b8146d4b03d89a9407125b5934bd7accbe0680e0 Mon Sep 17 00:00:00 2001 -From: Quentin Gliech <quenting@element.io> -Date: Fri, 2 May 2025 15:37:58 +0200 -Subject: [PATCH 30/74] Allow a few admin APIs used by MAS to run on workers - (#18313) - -This should be reviewed commit by commit. - -It adds a few admin servlets that are used by MAS when in delegation -mode to workers - ---------- - -Co-authored-by: Olivier 'reivilibre <oliverw@matrix.org> -Co-authored-by: Devon Hudson <devon.dmytro@gmail.com> -Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> ---- - changelog.d/18313.misc | 1 + - docs/workers.md | 9 + - synapse/app/generic_worker.py | 18 +- - synapse/app/homeserver.py | 9 +- - synapse/handlers/set_password.py | 18 +- - synapse/rest/__init__.py | 1 - - synapse/rest/admin/__init__.py | 29 +- - synapse/rest/admin/devices.py | 26 +- - .../storage/databases/main/end_to_end_keys.py | 78 +++--- - .../storage/databases/main/registration.py | 260 +++++++++--------- - 10 files changed, 249 insertions(+), 200 deletions(-) - create mode 100644 changelog.d/18313.misc - -diff --git a/changelog.d/18313.misc b/changelog.d/18313.misc -new file mode 100644 -index 0000000000..febf3ac06e ---- /dev/null -+++ b/changelog.d/18313.misc -@@ -0,0 +1 @@ -+Allow a few admin APIs used by matrix-authentication-service to run on workers. -diff --git a/docs/workers.md b/docs/workers.md -index 2597e78217..45a00696f3 100644 ---- a/docs/workers.md -+++ b/docs/workers.md -@@ -323,6 +323,15 @@ For multiple workers not handling the SSO endpoints properly, see - [#7530](https://github.com/matrix-org/synapse/issues/7530) and - [#9427](https://github.com/matrix-org/synapse/issues/9427). - -+Additionally, when MSC3861 is enabled (`experimental_features.msc3861.enabled` -+set to `true`), the following endpoints can be handled by the worker: -+ -+ ^/_synapse/admin/v2/users/[^/]+$ -+ ^/_synapse/admin/v1/username_available$ -+ ^/_synapse/admin/v1/users/[^/]+/_allow_cross_signing_replacement_without_uia$ -+ # Only the GET method: -+ ^/_synapse/admin/v1/users/[^/]+/devices$ -+ - Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners) - with `client` and `federation` `resources` must be configured in the - [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) -diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py -index e4120ed424..f495d5b7e4 100644 ---- a/synapse/app/generic_worker.py -+++ b/synapse/app/generic_worker.py -@@ -51,8 +51,7 @@ from synapse.http.server import JsonResource, OptionsResource - from synapse.logging.context import LoggingContext - from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy - from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource --from synapse.rest import ClientRestResource --from synapse.rest.admin import AdminRestResource, register_servlets_for_media_repo -+from synapse.rest import ClientRestResource, admin - from synapse.rest.health import HealthResource - from synapse.rest.key.v2 import KeyResource - from synapse.rest.synapse.client import build_synapse_client_resource_tree -@@ -176,8 +175,13 @@ class GenericWorkerServer(HomeServer): - def _listen_http(self, listener_config: ListenerConfig) -> None: - assert listener_config.http_options is not None - -- # We always include a health resource. -- resources: Dict[str, Resource] = {"/health": HealthResource()} -+ # We always include an admin resource that we populate with servlets as needed -+ admin_resource = JsonResource(self, canonical_json=False) -+ resources: Dict[str, Resource] = { -+ # We always include a health resource. -+ "/health": HealthResource(), -+ "/_synapse/admin": admin_resource, -+ } - - for res in listener_config.http_options.resources: - for name in res.names: -@@ -190,7 +194,7 @@ class GenericWorkerServer(HomeServer): - - resources.update(build_synapse_client_resource_tree(self)) - resources["/.well-known"] = well_known_resource(self) -- resources["/_synapse/admin"] = AdminRestResource(self) -+ admin.register_servlets(self, admin_resource) - - elif name == "federation": - resources[FEDERATION_PREFIX] = TransportLayerServer(self) -@@ -200,15 +204,13 @@ class GenericWorkerServer(HomeServer): - - # We need to serve the admin servlets for media on the - # worker. -- admin_resource = JsonResource(self, canonical_json=False) -- register_servlets_for_media_repo(self, admin_resource) -+ admin.register_servlets_for_media_repo(self, admin_resource) - - resources.update( - { - MEDIA_R0_PREFIX: media_repo, - MEDIA_V3_PREFIX: media_repo, - LEGACY_MEDIA_PREFIX: media_repo, -- "/_synapse/admin": admin_resource, - } - ) - -diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py -index 2a824e8457..6da2194cf7 100644 ---- a/synapse/app/homeserver.py -+++ b/synapse/app/homeserver.py -@@ -54,6 +54,7 @@ from synapse.config.server import ListenerConfig, TCPListenerConfig - from synapse.federation.transport.server import TransportLayerServer - from synapse.http.additional_resource import AdditionalResource - from synapse.http.server import ( -+ JsonResource, - OptionsResource, - RootOptionsRedirectResource, - StaticResource, -@@ -61,8 +62,7 @@ from synapse.http.server import ( - from synapse.logging.context import LoggingContext - from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy - from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource --from synapse.rest import ClientRestResource --from synapse.rest.admin import AdminRestResource -+from synapse.rest import ClientRestResource, admin - from synapse.rest.health import HealthResource - from synapse.rest.key.v2 import KeyResource - from synapse.rest.synapse.client import build_synapse_client_resource_tree -@@ -180,11 +180,14 @@ class SynapseHomeServer(HomeServer): - if compress: - client_resource = gz_wrap(client_resource) - -+ admin_resource = JsonResource(self, canonical_json=False) -+ admin.register_servlets(self, admin_resource) -+ - resources.update( - { - CLIENT_API_PREFIX: client_resource, - "/.well-known": well_known_resource(self), -- "/_synapse/admin": AdminRestResource(self), -+ "/_synapse/admin": admin_resource, - **build_synapse_client_resource_tree(self), - } - ) -diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py -index 29cc03d71d..94301add9e 100644 ---- a/synapse/handlers/set_password.py -+++ b/synapse/handlers/set_password.py -@@ -36,10 +36,17 @@ class SetPasswordHandler: - def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastores().main - self._auth_handler = hs.get_auth_handler() -- # This can only be instantiated on the main process. -- device_handler = hs.get_device_handler() -- assert isinstance(device_handler, DeviceHandler) -- self._device_handler = device_handler -+ -+ # We don't need the device handler if password changing is disabled. -+ # This allows us to instantiate the SetPasswordHandler on the workers -+ # that have admin APIs for MAS -+ if self._auth_handler.can_change_password(): -+ # This can only be instantiated on the main process. -+ device_handler = hs.get_device_handler() -+ assert isinstance(device_handler, DeviceHandler) -+ self._device_handler: Optional[DeviceHandler] = device_handler -+ else: -+ self._device_handler = None - - async def set_password( - self, -@@ -51,6 +58,9 @@ class SetPasswordHandler: - if not self._auth_handler.can_change_password(): - raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN) - -+ # We should have this available only if password changing is enabled. -+ assert self._device_handler is not None -+ - try: - await self.store.user_set_password_hash(user_id, password_hash) - except StoreError as e: -diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py -index 2f1ef84e26..00f108de08 100644 ---- a/synapse/rest/__init__.py -+++ b/synapse/rest/__init__.py -@@ -187,7 +187,6 @@ class ClientRestResource(JsonResource): - mutual_rooms.register_servlets, - login_token_request.register_servlets, - rendezvous.register_servlets, -- auth_metadata.register_servlets, - ]: - continue - -diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py -index cf809d1a27..b1335fed66 100644 ---- a/synapse/rest/admin/__init__.py -+++ b/synapse/rest/admin/__init__.py -@@ -39,7 +39,7 @@ from typing import TYPE_CHECKING, Optional, Tuple - - from synapse.api.errors import Codes, NotFoundError, SynapseError - from synapse.handlers.pagination import PURGE_HISTORY_ACTION_NAME --from synapse.http.server import HttpServer, JsonResource -+from synapse.http.server import HttpServer - from synapse.http.servlet import RestServlet, parse_json_object_from_request - from synapse.http.site import SynapseRequest - from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin -@@ -51,6 +51,7 @@ from synapse.rest.admin.background_updates import ( - from synapse.rest.admin.devices import ( - DeleteDevicesRestServlet, - DeviceRestServlet, -+ DevicesGetRestServlet, - DevicesRestServlet, - ) - from synapse.rest.admin.event_reports import ( -@@ -264,14 +265,6 @@ class PurgeHistoryStatusRestServlet(RestServlet): - ######################################################################################## - - --class AdminRestResource(JsonResource): -- """The REST resource which gets mounted at /_synapse/admin""" -- -- def __init__(self, hs: "HomeServer"): -- JsonResource.__init__(self, hs, canonical_json=False) -- register_servlets(hs, self) -- -- - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - """ - Register all the admin servlets. -@@ -280,6 +273,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - - # Admin servlets below may not work on workers. - if hs.config.worker.worker_app is not None: -+ # Some admin servlets can be mounted on workers when MSC3861 is enabled. -+ if hs.config.experimental.msc3861.enabled: -+ register_servlets_for_msc3861_delegation(hs, http_server) -+ - return - - register_servlets_for_client_rest_resource(hs, http_server) -@@ -367,4 +364,16 @@ def register_servlets_for_client_rest_resource( - ListMediaInRoom(hs).register(http_server) - - # don't add more things here: new servlets should only be exposed on -- # /_synapse/admin so should not go here. Instead register them in AdminRestResource. -+ # /_synapse/admin so should not go here. Instead register them in register_servlets. -+ -+ -+def register_servlets_for_msc3861_delegation( -+ hs: "HomeServer", http_server: HttpServer -+) -> None: -+ """Register servlets needed by MAS when MSC3861 is enabled""" -+ assert hs.config.experimental.msc3861.enabled -+ -+ UserRestServletV2(hs).register(http_server) -+ UsernameAvailableRestServlet(hs).register(http_server) -+ UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server) -+ DevicesGetRestServlet(hs).register(http_server) -diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py -index 449b066923..125ed8c491 100644 ---- a/synapse/rest/admin/devices.py -+++ b/synapse/rest/admin/devices.py -@@ -113,18 +113,19 @@ class DeviceRestServlet(RestServlet): - return HTTPStatus.OK, {} - - --class DevicesRestServlet(RestServlet): -+class DevicesGetRestServlet(RestServlet): - """ - Retrieve the given user's devices -+ -+ This can be mounted on workers as it is read-only, as opposed -+ to `DevicesRestServlet`. - """ - - PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/devices$", "v2") - - def __init__(self, hs: "HomeServer"): - self.auth = hs.get_auth() -- handler = hs.get_device_handler() -- assert isinstance(handler, DeviceHandler) -- self.device_handler = handler -+ self.device_worker_handler = hs.get_device_handler() - self.store = hs.get_datastores().main - self.is_mine = hs.is_mine - -@@ -141,9 +142,24 @@ class DevicesRestServlet(RestServlet): - if u is None: - raise NotFoundError("Unknown user") - -- devices = await self.device_handler.get_devices_by_user(target_user.to_string()) -+ devices = await self.device_worker_handler.get_devices_by_user( -+ target_user.to_string() -+ ) - return HTTPStatus.OK, {"devices": devices, "total": len(devices)} - -+ -+class DevicesRestServlet(DevicesGetRestServlet): -+ """ -+ Retrieve the given user's devices -+ """ -+ -+ PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/devices$", "v2") -+ -+ def __init__(self, hs: "HomeServer"): -+ super().__init__(hs) -+ assert isinstance(self.device_worker_handler, DeviceHandler) -+ self.device_handler = self.device_worker_handler -+ - async def on_POST( - self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: -diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py -index b4c7069958..341e7014d6 100644 ---- a/synapse/storage/databases/main/end_to_end_keys.py -+++ b/synapse/storage/databases/main/end_to_end_keys.py -@@ -1501,6 +1501,45 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker - "delete_old_otks_for_next_user_batch", impl - ) - -+ async def allow_master_cross_signing_key_replacement_without_uia( -+ self, user_id: str, duration_ms: int -+ ) -> Optional[int]: -+ """Mark this user's latest master key as being replaceable without UIA. -+ -+ Said replacement will only be permitted for a short time after calling this -+ function. That time period is controlled by the duration argument. -+ -+ Returns: -+ None, if there is no such key. -+ Otherwise, the timestamp before which replacement is allowed without UIA. -+ """ -+ timestamp = self._clock.time_msec() + duration_ms -+ -+ def impl(txn: LoggingTransaction) -> Optional[int]: -+ txn.execute( -+ """ -+ UPDATE e2e_cross_signing_keys -+ SET updatable_without_uia_before_ms = ? -+ WHERE stream_id = ( -+ SELECT stream_id -+ FROM e2e_cross_signing_keys -+ WHERE user_id = ? AND keytype = 'master' -+ ORDER BY stream_id DESC -+ LIMIT 1 -+ ) -+ """, -+ (timestamp, user_id), -+ ) -+ if txn.rowcount == 0: -+ return None -+ -+ return timestamp -+ -+ return await self.db_pool.runInteraction( -+ "allow_master_cross_signing_key_replacement_without_uia", -+ impl, -+ ) -+ - - class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): - def __init__( -@@ -1755,42 +1794,3 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): - ], - desc="add_e2e_signing_key", - ) -- -- async def allow_master_cross_signing_key_replacement_without_uia( -- self, user_id: str, duration_ms: int -- ) -> Optional[int]: -- """Mark this user's latest master key as being replaceable without UIA. -- -- Said replacement will only be permitted for a short time after calling this -- function. That time period is controlled by the duration argument. -- -- Returns: -- None, if there is no such key. -- Otherwise, the timestamp before which replacement is allowed without UIA. -- """ -- timestamp = self._clock.time_msec() + duration_ms -- -- def impl(txn: LoggingTransaction) -> Optional[int]: -- txn.execute( -- """ -- UPDATE e2e_cross_signing_keys -- SET updatable_without_uia_before_ms = ? -- WHERE stream_id = ( -- SELECT stream_id -- FROM e2e_cross_signing_keys -- WHERE user_id = ? AND keytype = 'master' -- ORDER BY stream_id DESC -- LIMIT 1 -- ) -- """, -- (timestamp, user_id), -- ) -- if txn.rowcount == 0: -- return None -- -- return timestamp -- -- return await self.db_pool.runInteraction( -- "allow_master_cross_signing_key_replacement_without_uia", -- impl, -- ) -diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py -index c43f31353b..1aeae951c5 100644 ---- a/synapse/storage/databases/main/registration.py -+++ b/synapse/storage/databases/main/registration.py -@@ -2105,6 +2105,136 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): - func=is_user_approved_txn, - ) - -+ async def set_user_deactivated_status( -+ self, user_id: str, deactivated: bool -+ ) -> None: -+ """Set the `deactivated` property for the provided user to the provided value. -+ -+ Args: -+ user_id: The ID of the user to set the status for. -+ deactivated: The value to set for `deactivated`. -+ """ -+ -+ await self.db_pool.runInteraction( -+ "set_user_deactivated_status", -+ self.set_user_deactivated_status_txn, -+ user_id, -+ deactivated, -+ ) -+ -+ def set_user_deactivated_status_txn( -+ self, txn: LoggingTransaction, user_id: str, deactivated: bool -+ ) -> None: -+ self.db_pool.simple_update_one_txn( -+ txn=txn, -+ table="users", -+ keyvalues={"name": user_id}, -+ updatevalues={"deactivated": 1 if deactivated else 0}, -+ ) -+ self._invalidate_cache_and_stream( -+ txn, self.get_user_deactivated_status, (user_id,) -+ ) -+ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) -+ self._invalidate_cache_and_stream(txn, self.is_guest, (user_id,)) -+ -+ async def set_user_suspended_status(self, user_id: str, suspended: bool) -> None: -+ """ -+ Set whether the user's account is suspended in the `users` table. -+ -+ Args: -+ user_id: The user ID of the user in question -+ suspended: True if the user is suspended, false if not -+ """ -+ await self.db_pool.runInteraction( -+ "set_user_suspended_status", -+ self.set_user_suspended_status_txn, -+ user_id, -+ suspended, -+ ) -+ -+ def set_user_suspended_status_txn( -+ self, txn: LoggingTransaction, user_id: str, suspended: bool -+ ) -> None: -+ self.db_pool.simple_update_one_txn( -+ txn=txn, -+ table="users", -+ keyvalues={"name": user_id}, -+ updatevalues={"suspended": suspended}, -+ ) -+ self._invalidate_cache_and_stream( -+ txn, self.get_user_suspended_status, (user_id,) -+ ) -+ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) -+ -+ async def set_user_locked_status(self, user_id: str, locked: bool) -> None: -+ """Set the `locked` property for the provided user to the provided value. -+ -+ Args: -+ user_id: The ID of the user to set the status for. -+ locked: The value to set for `locked`. -+ """ -+ -+ await self.db_pool.runInteraction( -+ "set_user_locked_status", -+ self.set_user_locked_status_txn, -+ user_id, -+ locked, -+ ) -+ -+ def set_user_locked_status_txn( -+ self, txn: LoggingTransaction, user_id: str, locked: bool -+ ) -> None: -+ self.db_pool.simple_update_one_txn( -+ txn=txn, -+ table="users", -+ keyvalues={"name": user_id}, -+ updatevalues={"locked": locked}, -+ ) -+ self._invalidate_cache_and_stream(txn, self.get_user_locked_status, (user_id,)) -+ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) -+ -+ async def update_user_approval_status( -+ self, user_id: UserID, approved: bool -+ ) -> None: -+ """Set the user's 'approved' flag to the given value. -+ -+ The boolean will be turned into an int (in update_user_approval_status_txn) -+ because the column is a smallint. -+ -+ Args: -+ user_id: the user to update the flag for. -+ approved: the value to set the flag to. -+ """ -+ await self.db_pool.runInteraction( -+ "update_user_approval_status", -+ self.update_user_approval_status_txn, -+ user_id.to_string(), -+ approved, -+ ) -+ -+ def update_user_approval_status_txn( -+ self, txn: LoggingTransaction, user_id: str, approved: bool -+ ) -> None: -+ """Set the user's 'approved' flag to the given value. -+ -+ The boolean is turned into an int because the column is a smallint. -+ -+ Args: -+ txn: the current database transaction. -+ user_id: the user to update the flag for. -+ approved: the value to set the flag to. -+ """ -+ self.db_pool.simple_update_one_txn( -+ txn=txn, -+ table="users", -+ keyvalues={"name": user_id}, -+ updatevalues={"approved": approved}, -+ ) -+ -+ # Invalidate the caches of methods that read the value of the 'approved' flag. -+ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) -+ self._invalidate_cache_and_stream(txn, self.is_user_approved, (user_id,)) -+ - - class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): - def __init__( -@@ -2217,117 +2347,6 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): - - return nb_processed - -- async def set_user_deactivated_status( -- self, user_id: str, deactivated: bool -- ) -> None: -- """Set the `deactivated` property for the provided user to the provided value. -- -- Args: -- user_id: The ID of the user to set the status for. -- deactivated: The value to set for `deactivated`. -- """ -- -- await self.db_pool.runInteraction( -- "set_user_deactivated_status", -- self.set_user_deactivated_status_txn, -- user_id, -- deactivated, -- ) -- -- def set_user_deactivated_status_txn( -- self, txn: LoggingTransaction, user_id: str, deactivated: bool -- ) -> None: -- self.db_pool.simple_update_one_txn( -- txn=txn, -- table="users", -- keyvalues={"name": user_id}, -- updatevalues={"deactivated": 1 if deactivated else 0}, -- ) -- self._invalidate_cache_and_stream( -- txn, self.get_user_deactivated_status, (user_id,) -- ) -- self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) -- txn.call_after(self.is_guest.invalidate, (user_id,)) -- -- async def set_user_suspended_status(self, user_id: str, suspended: bool) -> None: -- """ -- Set whether the user's account is suspended in the `users` table. -- -- Args: -- user_id: The user ID of the user in question -- suspended: True if the user is suspended, false if not -- """ -- await self.db_pool.runInteraction( -- "set_user_suspended_status", -- self.set_user_suspended_status_txn, -- user_id, -- suspended, -- ) -- -- def set_user_suspended_status_txn( -- self, txn: LoggingTransaction, user_id: str, suspended: bool -- ) -> None: -- self.db_pool.simple_update_one_txn( -- txn=txn, -- table="users", -- keyvalues={"name": user_id}, -- updatevalues={"suspended": suspended}, -- ) -- self._invalidate_cache_and_stream( -- txn, self.get_user_suspended_status, (user_id,) -- ) -- self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) -- -- async def set_user_locked_status(self, user_id: str, locked: bool) -> None: -- """Set the `locked` property for the provided user to the provided value. -- -- Args: -- user_id: The ID of the user to set the status for. -- locked: The value to set for `locked`. -- """ -- -- await self.db_pool.runInteraction( -- "set_user_locked_status", -- self.set_user_locked_status_txn, -- user_id, -- locked, -- ) -- -- def set_user_locked_status_txn( -- self, txn: LoggingTransaction, user_id: str, locked: bool -- ) -> None: -- self.db_pool.simple_update_one_txn( -- txn=txn, -- table="users", -- keyvalues={"name": user_id}, -- updatevalues={"locked": locked}, -- ) -- self._invalidate_cache_and_stream(txn, self.get_user_locked_status, (user_id,)) -- self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) -- -- def update_user_approval_status_txn( -- self, txn: LoggingTransaction, user_id: str, approved: bool -- ) -> None: -- """Set the user's 'approved' flag to the given value. -- -- The boolean is turned into an int because the column is a smallint. -- -- Args: -- txn: the current database transaction. -- user_id: the user to update the flag for. -- approved: the value to set the flag to. -- """ -- self.db_pool.simple_update_one_txn( -- txn=txn, -- table="users", -- keyvalues={"name": user_id}, -- updatevalues={"approved": approved}, -- ) -- -- # Invalidate the caches of methods that read the value of the 'approved' flag. -- self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) -- self._invalidate_cache_and_stream(txn, self.is_user_approved, (user_id,)) -- - - class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): - def __init__( -@@ -2956,25 +2975,6 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): - start_or_continue_validation_session_txn, - ) - -- async def update_user_approval_status( -- self, user_id: UserID, approved: bool -- ) -> None: -- """Set the user's 'approved' flag to the given value. -- -- The boolean will be turned into an int (in update_user_approval_status_txn) -- because the column is a smallint. -- -- Args: -- user_id: the user to update the flag for. -- approved: the value to set the flag to. -- """ -- await self.db_pool.runInteraction( -- "update_user_approval_status", -- self.update_user_approval_status_txn, -- user_id.to_string(), -- approved, -- ) -- - @wrap_as_background_process("delete_expired_login_tokens") - async def _delete_expired_login_tokens(self) -> None: - """Remove login tokens with expiry dates that have passed.""" --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0031-Add-the-ability-to-exclude-remote-users-in-user-dire.patch b/packages/overlays/matrix-synapse/patches/0031-Add-the-ability-to-exclude-remote-users-in-user-dire.patch deleted file mode 100644
index b751f04..0000000 --- a/packages/overlays/matrix-synapse/patches/0031-Add-the-ability-to-exclude-remote-users-in-user-dire.patch +++ /dev/null
@@ -1,247 +0,0 @@ -From fe8bb620de8e5830328c6d23127657560f449af0 Mon Sep 17 00:00:00 2001 -From: Will Lewis <1543626+wrjlewis@users.noreply.github.com> -Date: Fri, 2 May 2025 15:38:02 +0100 -Subject: [PATCH 31/74] Add the ability to exclude remote users in user - directory search results (#18300) - -This change adds a new configuration -`user_directory.exclude_remote_users`, which defaults to False. -When set to True, remote users will not appear in user directory search -results. - -### Pull Request Checklist - -<!-- Please read -https://element-hq.github.io/synapse/latest/development/contributing_guide.html -before submitting your pull request --> - -* [x] Pull request is based on the develop branch -* [x] Pull request includes a [changelog -file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). -The entry should: -- Be a short description of your change which makes sense to users. -"Fixed a bug that prevented receiving messages from other servers." -instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - - Use markdown where necessary, mostly for `code blocks`. - - End with either a period (.) or an exclamation mark (!). - - Start with a capital letter. -- Feel free to credit yourself, by adding a sentence "Contributed by -@github_username." or "Contributed by [Your Name]." to the end of the -entry. -* [x] [Code -style](https://element-hq.github.io/synapse/latest/code_style.html) is -correct -(run the -[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) - ---------- - -Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> ---- - changelog.d/18300.feature | 1 + - .../configuration/config_documentation.md | 2 + - synapse/config/user_directory.py | 3 + - synapse/handlers/user_directory.py | 3 + - .../storage/databases/main/user_directory.py | 18 ++++-- - tests/handlers/test_user_directory.py | 61 +++++++++++++++++++ - 6 files changed, 84 insertions(+), 4 deletions(-) - create mode 100644 changelog.d/18300.feature - -diff --git a/changelog.d/18300.feature b/changelog.d/18300.feature -new file mode 100644 -index 0000000000..92bea77556 ---- /dev/null -+++ b/changelog.d/18300.feature -@@ -0,0 +1 @@ -+Add config option `user_directory.exclude_remote_users` which, when enabled, excludes remote users from user directory search results. -\ No newline at end of file -diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md -index 19dc9dd356..5351bef83a 100644 ---- a/docs/usage/configuration/config_documentation.md -+++ b/docs/usage/configuration/config_documentation.md -@@ -4095,6 +4095,7 @@ This option has the following sub-options: - * `prefer_local_users`: Defines whether to prefer local users in search query results. - If set to true, local users are more likely to appear above remote users when searching the - user directory. Defaults to false. -+* `exclude_remote_users`: If set to true, the search will only return local users. Defaults to false. - * `show_locked_users`: Defines whether to show locked users in search query results. Defaults to false. - - Example configuration: -@@ -4103,6 +4104,7 @@ user_directory: - enabled: false - search_all_users: true - prefer_local_users: true -+ exclude_remote_users: false - show_locked_users: true - ``` - --- -diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py -index c67796906f..fe4e2dc65c 100644 ---- a/synapse/config/user_directory.py -+++ b/synapse/config/user_directory.py -@@ -38,6 +38,9 @@ class UserDirectoryConfig(Config): - self.user_directory_search_all_users = user_directory_config.get( - "search_all_users", False - ) -+ self.user_directory_exclude_remote_users = user_directory_config.get( -+ "exclude_remote_users", False -+ ) - self.user_directory_search_prefer_local_users = user_directory_config.get( - "prefer_local_users", False - ) -diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py -index f88d39b38f..33edef5f14 100644 ---- a/synapse/handlers/user_directory.py -+++ b/synapse/handlers/user_directory.py -@@ -108,6 +108,9 @@ class UserDirectoryHandler(StateDeltasHandler): - self.is_mine_id = hs.is_mine_id - self.update_user_directory = hs.config.worker.should_update_user_directory - self.search_all_users = hs.config.userdirectory.user_directory_search_all_users -+ self.exclude_remote_users = ( -+ hs.config.userdirectory.user_directory_exclude_remote_users -+ ) - self.show_locked_users = hs.config.userdirectory.show_locked_users - self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker - self._hs = hs -diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py -index d6cd0774a8..391f0dd638 100644 ---- a/synapse/storage/databases/main/user_directory.py -+++ b/synapse/storage/databases/main/user_directory.py -@@ -1037,11 +1037,11 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): - } - """ - -+ join_args: Tuple[str, ...] = (user_id,) -+ - if self.hs.config.userdirectory.user_directory_search_all_users: -- join_args = (user_id,) - where_clause = "user_id != ?" - else: -- join_args = (user_id,) - where_clause = """ - ( - EXISTS (select 1 from users_in_public_rooms WHERE user_id = t.user_id) -@@ -1055,6 +1055,14 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): - if not show_locked_users: - where_clause += " AND (u.locked IS NULL OR u.locked = FALSE)" - -+ # Adjust the JOIN type based on the exclude_remote_users flag (the users -+ # table only contains local users so an inner join is a good way to -+ # to exclude remote users) -+ if self.hs.config.userdirectory.user_directory_exclude_remote_users: -+ join_type = "JOIN" -+ else: -+ join_type = "LEFT JOIN" -+ - # We allow manipulating the ranking algorithm by injecting statements - # based on config options. - additional_ordering_statements = [] -@@ -1086,7 +1094,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): - SELECT d.user_id AS user_id, display_name, avatar_url - FROM matching_users as t - INNER JOIN user_directory AS d USING (user_id) -- LEFT JOIN users AS u ON t.user_id = u.name -+ %(join_type)s users AS u ON t.user_id = u.name - WHERE - %(where_clause)s - ORDER BY -@@ -1115,6 +1123,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): - """ % { - "where_clause": where_clause, - "order_case_statements": " ".join(additional_ordering_statements), -+ "join_type": join_type, - } - args = ( - (full_query,) -@@ -1142,7 +1151,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): - SELECT d.user_id AS user_id, display_name, avatar_url - FROM user_directory_search as t - INNER JOIN user_directory AS d USING (user_id) -- LEFT JOIN users AS u ON t.user_id = u.name -+ %(join_type)s users AS u ON t.user_id = u.name - WHERE - %(where_clause)s - AND value MATCH ? -@@ -1155,6 +1164,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): - """ % { - "where_clause": where_clause, - "order_statements": " ".join(additional_ordering_statements), -+ "join_type": join_type, - } - args = join_args + (search_query,) + ordering_arguments + (limit + 1,) - else: -diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py -index a75095a79f..a9e9d7d7ea 100644 ---- a/tests/handlers/test_user_directory.py -+++ b/tests/handlers/test_user_directory.py -@@ -992,6 +992,67 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): - [self.assertIn(user, local_users) for user in received_user_id_ordering[:3]] - [self.assertIn(user, remote_users) for user in received_user_id_ordering[3:]] - -+ @override_config( -+ { -+ "user_directory": { -+ "enabled": True, -+ "search_all_users": True, -+ "exclude_remote_users": True, -+ } -+ } -+ ) -+ def test_exclude_remote_users(self) -> None: -+ """Tests that only local users are returned when -+ user_directory.exclude_remote_users is True. -+ """ -+ -+ # Create a room and few users to test the directory with -+ searching_user = self.register_user("searcher", "password") -+ searching_user_tok = self.login("searcher", "password") -+ -+ room_id = self.helper.create_room_as( -+ searching_user, -+ room_version=RoomVersions.V1.identifier, -+ tok=searching_user_tok, -+ ) -+ -+ # Create a few local users and join them to the room -+ local_user_1 = self.register_user("user_xxxxx", "password") -+ local_user_2 = self.register_user("user_bbbbb", "password") -+ local_user_3 = self.register_user("user_zzzzz", "password") -+ -+ self._add_user_to_room(room_id, RoomVersions.V1, local_user_1) -+ self._add_user_to_room(room_id, RoomVersions.V1, local_user_2) -+ self._add_user_to_room(room_id, RoomVersions.V1, local_user_3) -+ -+ # Create a few "remote" users and join them to the room -+ remote_user_1 = "@user_aaaaa:remote_server" -+ remote_user_2 = "@user_yyyyy:remote_server" -+ remote_user_3 = "@user_ccccc:remote_server" -+ self._add_user_to_room(room_id, RoomVersions.V1, remote_user_1) -+ self._add_user_to_room(room_id, RoomVersions.V1, remote_user_2) -+ self._add_user_to_room(room_id, RoomVersions.V1, remote_user_3) -+ -+ local_users = [local_user_1, local_user_2, local_user_3] -+ remote_users = [remote_user_1, remote_user_2, remote_user_3] -+ -+ # The local searching user searches for the term "user", which other users have -+ # in their user id -+ results = self.get_success( -+ self.handler.search_users(searching_user, "user", 20) -+ )["results"] -+ received_user_ids = [result["user_id"] for result in results] -+ -+ for user in local_users: -+ self.assertIn( -+ user, received_user_ids, f"Local user {user} not found in results" -+ ) -+ -+ for user in remote_users: -+ self.assertNotIn( -+ user, received_user_ids, f"Remote user {user} should not be in results" -+ ) -+ - def _add_user_to_room( - self, - room_id: str, --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0074-Log-entire-room-if-accessibility-check-fails.patch b/packages/overlays/matrix-synapse/patches/0031-Log-entire-room-if-accessibility-check-fails.patch
index 12a33e6..a3dbcad 100644 --- a/packages/overlays/matrix-synapse/patches/0074-Log-entire-room-if-accessibility-check-fails.patch +++ b/packages/overlays/matrix-synapse/patches/0031-Log-entire-room-if-accessibility-check-fails.patch
@@ -1,7 +1,7 @@ -From 923a3c7204aea235744d3081a1d3cc99b757f801 Mon Sep 17 00:00:00 2001 +From 5951a67a191cb54a9b9b801ca5faf994ec106143 Mon Sep 17 00:00:00 2001 From: Rory& <root@rory.gay> Date: Wed, 23 Apr 2025 18:24:57 +0200 -Subject: [PATCH 74/74] Log entire room if accessibility check fails +Subject: [PATCH 31/34] Log entire room if accessibility check fails Signed-off-by: Rory& <root@rory.gay> --- diff --git a/packages/overlays/matrix-synapse/patches/0032-Log-policy-server-rejected-events.patch b/packages/overlays/matrix-synapse/patches/0032-Log-policy-server-rejected-events.patch new file mode 100644
index 0000000..66c0250 --- /dev/null +++ b/packages/overlays/matrix-synapse/patches/0032-Log-policy-server-rejected-events.patch
@@ -0,0 +1,30 @@ +From abdbd4952722a7bf816e12d0a70192f88ec6041e Mon Sep 17 00:00:00 2001 +From: Rory& <root@rory.gay> +Date: Tue, 27 May 2025 05:21:46 +0200 +Subject: [PATCH 32/34] Log policy server rejected events + +--- + synapse/handlers/room_policy.py | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/synapse/handlers/room_policy.py b/synapse/handlers/room_policy.py +index dcfebb128c..3a83c4d6ec 100644 +--- a/synapse/handlers/room_policy.py ++++ b/synapse/handlers/room_policy.py +@@ -84,6 +84,13 @@ class RoomPolicyHandler: + policy_server, event + ) + if recommendation != RECOMMENDATION_OK: ++ logger.info( ++ "[POLICY] Policy server %s recommended not to allow event %s in room %s: %s", ++ policy_server, ++ event.event_id, ++ event.room_id, ++ recommendation, ++ ) + return False + + return True # default allow +-- +2.49.0 + diff --git a/packages/overlays/matrix-synapse/patches/0032-Return-specific-error-code-when-email-phone-not-supp.patch b/packages/overlays/matrix-synapse/patches/0032-Return-specific-error-code-when-email-phone-not-supp.patch deleted file mode 100644
index 22df408..0000000 --- a/packages/overlays/matrix-synapse/patches/0032-Return-specific-error-code-when-email-phone-not-supp.patch +++ /dev/null
@@ -1,118 +0,0 @@ -From 9f9eb563339079ee5ce082fcd63d0ab5d849b7ed Mon Sep 17 00:00:00 2001 -From: David Baker <dbkr@users.noreply.github.com> -Date: Mon, 5 May 2025 10:08:50 +0100 -Subject: [PATCH 32/74] Return specific error code when email / phone not - supported (#17578) - -Implements https://github.com/matrix-org/matrix-spec-proposals/pull/4178 - -If this would need tests, could you give some idea of what tests would -be needed and how best to add them? - -### Pull Request Checklist - -<!-- Please read -https://element-hq.github.io/synapse/latest/development/contributing_guide.html -before submitting your pull request --> - -* [ ] Pull request is based on the develop branch -* [ ] Pull request includes a [changelog -file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). -The entry should: -- Be a short description of your change which makes sense to users. -"Fixed a bug that prevented receiving messages from other servers." -instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - - Use markdown where necessary, mostly for `code blocks`. - - End with either a period (.) or an exclamation mark (!). - - Start with a capital letter. -- Feel free to credit yourself, by adding a sentence "Contributed by -@github_username." or "Contributed by [Your Name]." to the end of the -entry. -* [ ] [Code -style](https://element-hq.github.io/synapse/latest/code_style.html) is -correct -(run the -[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) ---- - changelog.d/17578.misc | 1 + - synapse/api/errors.py | 1 + - synapse/rest/client/account.py | 6 +++++- - synapse/util/msisdn.py | 4 ++-- - 4 files changed, 9 insertions(+), 3 deletions(-) - create mode 100644 changelog.d/17578.misc - -diff --git a/changelog.d/17578.misc b/changelog.d/17578.misc -new file mode 100644 -index 0000000000..7bf69576cd ---- /dev/null -+++ b/changelog.d/17578.misc -@@ -0,0 +1 @@ -+Return specific error code when adding an email address / phone number to account is not supported (MSC4178). -diff --git a/synapse/api/errors.py b/synapse/api/errors.py -index 5dd6e84289..edd2073384 100644 ---- a/synapse/api/errors.py -+++ b/synapse/api/errors.py -@@ -70,6 +70,7 @@ class Codes(str, Enum): - THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND" - THREEPID_DENIED = "M_THREEPID_DENIED" - INVALID_USERNAME = "M_INVALID_USERNAME" -+ THREEPID_MEDIUM_NOT_SUPPORTED = "M_THREEPID_MEDIUM_NOT_SUPPORTED" - SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED" - CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN" - CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM" -diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py -index 59dbad3582..7d6c0afd9a 100644 ---- a/synapse/rest/client/account.py -+++ b/synapse/rest/client/account.py -@@ -350,6 +350,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): - raise SynapseError( - 400, - "Adding an email to your account is disabled on this server", -+ Codes.THREEPID_MEDIUM_NOT_SUPPORTED, - ) - - body = parse_and_validate_json_object_from_request( -@@ -456,6 +457,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): - raise SynapseError( - 400, - "Adding phone numbers to user account is not supported by this homeserver", -+ Codes.THREEPID_MEDIUM_NOT_SUPPORTED, - ) - - ret = await self.identity_handler.requestMsisdnToken( -@@ -498,7 +500,9 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): - "Adding emails have been disabled due to lack of an email config" - ) - raise SynapseError( -- 400, "Adding an email to your account is disabled on this server" -+ 400, -+ "Adding an email to your account is disabled on this server", -+ Codes.THREEPID_MEDIUM_NOT_SUPPORTED, - ) - - sid = parse_string(request, "sid", required=True) -diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py -index b6a784f0bc..dce8da5e18 100644 ---- a/synapse/util/msisdn.py -+++ b/synapse/util/msisdn.py -@@ -21,7 +21,7 @@ - - import phonenumbers - --from synapse.api.errors import SynapseError -+from synapse.api.errors import Codes, SynapseError - - - def phone_number_to_msisdn(country: str, number: str) -> str: -@@ -45,7 +45,7 @@ def phone_number_to_msisdn(country: str, number: str) -> str: - try: - phoneNumber = phonenumbers.parse(number, country) - except phonenumbers.NumberParseException: -- raise SynapseError(400, "Unable to parse phone number") -+ raise SynapseError(400, "Unable to parse phone number", Codes.INVALID_PARAM) - return phonenumbers.format_number(phoneNumber, phonenumbers.PhoneNumberFormat.E164)[ - 1: - ] --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0033-Use-parse_boolean-for-unredacted-content.patch b/packages/overlays/matrix-synapse/patches/0033-Use-parse_boolean-for-unredacted-content.patch new file mode 100644
index 0000000..93567d4 --- /dev/null +++ b/packages/overlays/matrix-synapse/patches/0033-Use-parse_boolean-for-unredacted-content.patch
@@ -0,0 +1,29 @@ +From 22d37c310c09dcc65f118cc3d39bda0a65507759 Mon Sep 17 00:00:00 2001 +From: Rory& <root@rory.gay> +Date: Tue, 27 May 2025 06:14:26 +0200 +Subject: [PATCH 33/34] Use parse_boolean for unredacted content + +--- + synapse/rest/client/room.py | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py +index 03e7bc0a24..725b2162fd 100644 +--- a/synapse/rest/client/room.py ++++ b/synapse/rest/client/room.py +@@ -894,10 +894,9 @@ class RoomEventServlet(RestServlet): + requester = await self.auth.get_user_by_req(request, allow_guest=True) + + include_unredacted_content = self.msc2815_enabled and ( +- parse_string( ++ parse_boolean( + request, +- "fi.mau.msc2815.include_unredacted_content", +- allowed_values=("true", "false"), ++ "fi.mau.msc2815.include_unredacted_content" + ) + == "true" + ) +-- +2.49.0 + diff --git a/packages/overlays/matrix-synapse/patches/0034-Ensure-the-url-previewer-also-hashes-and-quarantines.patch b/packages/overlays/matrix-synapse/patches/0034-Ensure-the-url-previewer-also-hashes-and-quarantines.patch deleted file mode 100644
index 62f579f..0000000 --- a/packages/overlays/matrix-synapse/patches/0034-Ensure-the-url-previewer-also-hashes-and-quarantines.patch +++ /dev/null
@@ -1,87 +0,0 @@ -From d0873d549a8cce720a7842919126d78b4d9d030d Mon Sep 17 00:00:00 2001 -From: Will Hunt <will@half-shot.uk> -Date: Tue, 6 May 2025 11:04:31 +0100 -Subject: [PATCH 34/74] Ensure the url previewer also hashes and quarantines - media (#18297) - -Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> ---- - changelog.d/18297.misc | 1 + - synapse/media/media_repository.py | 1 - - synapse/media/url_previewer.py | 17 ++++++++++++++--- - 3 files changed, 15 insertions(+), 4 deletions(-) - create mode 100644 changelog.d/18297.misc - -diff --git a/changelog.d/18297.misc b/changelog.d/18297.misc -new file mode 100644 -index 0000000000..5032d48174 ---- /dev/null -+++ b/changelog.d/18297.misc -@@ -0,0 +1 @@ -+Apply file hashing and existing quarantines to media downloaded for URL previews. -diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py -index 859b30e029..18c5a8ecec 100644 ---- a/synapse/media/media_repository.py -+++ b/synapse/media/media_repository.py -@@ -378,7 +378,6 @@ class MediaRepository: - media_length=content_length, - user_id=auth_user, - sha256=sha256, -- # TODO: Better name? - quarantined_by="system" if should_quarantine else None, - ) - -diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py -index 2e65a04789..8ef2b3f0c0 100644 ---- a/synapse/media/url_previewer.py -+++ b/synapse/media/url_previewer.py -@@ -41,7 +41,7 @@ from synapse.api.errors import Codes, SynapseError - from synapse.http.client import SimpleHttpClient - from synapse.logging.context import make_deferred_yieldable, run_in_background - from synapse.media._base import FileInfo, get_filename_from_headers --from synapse.media.media_storage import MediaStorage -+from synapse.media.media_storage import MediaStorage, SHA256TransparentIOWriter - from synapse.media.oembed import OEmbedProvider - from synapse.media.preview_html import decode_body, parse_html_to_open_graph - from synapse.metrics.background_process_metrics import run_as_background_process -@@ -593,17 +593,26 @@ class UrlPreviewer: - file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True) - - async with self.media_storage.store_into_file(file_info) as (f, fname): -+ sha256writer = SHA256TransparentIOWriter(f) - if url.startswith("data:"): - if not allow_data_urls: - raise SynapseError( - 500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN - ) - -- download_result = await self._parse_data_url(url, f) -+ download_result = await self._parse_data_url(url, sha256writer.wrap()) - else: -- download_result = await self._download_url(url, f) -+ download_result = await self._download_url(url, sha256writer.wrap()) - - try: -+ sha256 = sha256writer.hexdigest() -+ should_quarantine = await self.store.get_is_hash_quarantined(sha256) -+ -+ if should_quarantine: -+ logger.warn( -+ "Media has been automatically quarantined as it matched existing quarantined media" -+ ) -+ - time_now_ms = self.clock.time_msec() - - await self.store.store_local_media( -@@ -614,6 +623,8 @@ class UrlPreviewer: - media_length=download_result.length, - user_id=user, - url_cache=url, -+ sha256=sha256, -+ quarantined_by="system" if should_quarantine else None, - ) - - except Exception as e: --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0034-Expose-tombstone-in-room-admin-api.patch b/packages/overlays/matrix-synapse/patches/0034-Expose-tombstone-in-room-admin-api.patch new file mode 100644
index 0000000..06a5789 --- /dev/null +++ b/packages/overlays/matrix-synapse/patches/0034-Expose-tombstone-in-room-admin-api.patch
@@ -0,0 +1,139 @@ +From 8da5632efc85ad4043fd81e49b4a68fd8bab226e Mon Sep 17 00:00:00 2001 +From: Rory& <root@rory.gay> +Date: Tue, 27 May 2025 06:37:52 +0200 +Subject: [PATCH 34/34] Expose tombstone in room admin api + +--- + synapse/rest/admin/rooms.py | 1 + + synapse/rest/client/room.py | 1 - + synapse/storage/databases/main/room.py | 68 +++++++++++++++++++------- + 3 files changed, 50 insertions(+), 20 deletions(-) + +diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py +index f8c5bf18d4..60a28abd18 100644 +--- a/synapse/rest/admin/rooms.py ++++ b/synapse/rest/admin/rooms.py +@@ -260,6 +260,7 @@ class ListRoomRestServlet(RestServlet): + search_term, + public_rooms, + empty_rooms, ++ emma_include_tombstone = True + ) + + response = { +diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py +index 725b2162fd..8408c687cc 100644 +--- a/synapse/rest/client/room.py ++++ b/synapse/rest/client/room.py +@@ -898,7 +898,6 @@ class RoomEventServlet(RestServlet): + request, + "fi.mau.msc2815.include_unredacted_content" + ) +- == "true" + ) + if include_unredacted_content and not await self.auth.is_server_admin( + requester +diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py +index 56217fccdf..5f4d024fce 100644 +--- a/synapse/storage/databases/main/room.py ++++ b/synapse/storage/databases/main/room.py +@@ -608,6 +608,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): + search_term: Optional[str], + public_rooms: Optional[bool], + empty_rooms: Optional[bool], ++ emma_include_tombstone: bool = False, + ) -> Tuple[List[Dict[str, Any]], int]: + """Function to retrieve a paginated list of rooms as json. + +@@ -627,6 +628,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): + If true, empty rooms are queried. + if false, empty rooms are excluded from the query. When it is + none (the default), both empty rooms and none-empty rooms are queried. ++ emma_include_tombstone: If true, include tombstone events in the results. + Returns: + A list of room dicts and an integer representing the total number of + rooms that exist given this query +@@ -755,6 +757,17 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): + where=where_clause, + ) + ++ # Emma: we're assuming this is the same db... ++ get_current_state_event_id_sql = """ ++ SELECT event_id FROM current_state_events ++ WHERE room_id = ? AND type = ? AND state_key = ? ++ """ ++ ++ get_event_json_sql = """ ++ SELECT json FROM event_json ++ WHERE event_id = ? ++ """ ++ + def _get_rooms_paginate_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Dict[str, Any]], int]: +@@ -765,26 +778,43 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): + # Refactor room query data into a structured dictionary + rooms = [] + for room in txn: ++ roomData = { ++ "room_id": room[0], ++ "name": room[1], ++ "canonical_alias": room[2], ++ "joined_members": room[3], ++ "joined_local_members": room[4], ++ "version": room[5], ++ "creator": room[6], ++ "encryption": room[7], ++ # room_stats_state.federatable is an integer on sqlite. ++ "federatable": bool(room[8]), ++ # rooms.is_public is an integer on sqlite. ++ "public": bool(room[9]), ++ "join_rules": room[10], ++ "guest_access": room[11], ++ "history_visibility": room[12], ++ "state_events": room[13], ++ "room_type": room[14], ++ } ++ ++ if emma_include_tombstone: ++ tombstone_id = self.db_pool.execute("get_tombstone_event_id", get_current_state_event_id_sql, (room[0], EventTypes.Tombstone, "")).fetchone() ++ #if tombstone_id: ++ # tombstone_event_id = tombstone_id[0] ++ # # Get the tombstone event ++ # event_json = self.db_pool.execute( ++ # "get_tombstone_event_json", ++ # get_event_json_sql, ++ # (tombstone_event_id) ++ # ).fetchone() ++# ++ # roomData["gay.rory.synapse_extensions.tombstone"] = event_json ++ #else: ++ # roomData["gay.rory.synapse_extensions.tombstone"] = None ++ + rooms.append( +- { +- "room_id": room[0], +- "name": room[1], +- "canonical_alias": room[2], +- "joined_members": room[3], +- "joined_local_members": room[4], +- "version": room[5], +- "creator": room[6], +- "encryption": room[7], +- # room_stats_state.federatable is an integer on sqlite. +- "federatable": bool(room[8]), +- # rooms.is_public is an integer on sqlite. +- "public": bool(room[9]), +- "join_rules": room[10], +- "guest_access": room[11], +- "history_visibility": room[12], +- "state_events": room[13], +- "room_type": room[14], +- } ++ roomData + ) + + # Execute the count query +-- +2.49.0 + diff --git a/packages/overlays/matrix-synapse/patches/0035-Convert-Sliding-Sync-tests-to-use-higher-level-compu.patch b/packages/overlays/matrix-synapse/patches/0035-Convert-Sliding-Sync-tests-to-use-higher-level-compu.patch deleted file mode 100644
index d34a1be..0000000 --- a/packages/overlays/matrix-synapse/patches/0035-Convert-Sliding-Sync-tests-to-use-higher-level-compu.patch +++ /dev/null
@@ -1,2816 +0,0 @@ -From ae877aa101796a0cd57c3637a875140ddb25ed51 Mon Sep 17 00:00:00 2001 -From: Devon Hudson <devon.dmytro@gmail.com> -Date: Wed, 7 May 2025 15:07:58 +0000 -Subject: [PATCH 35/74] Convert Sliding Sync tests to use higher-level - `compute_interested_rooms` (#18399) - -Spawning from -https://github.com/element-hq/synapse/pull/18375#discussion_r2071768635, - -This updates some sliding sync tests to use a higher level function in -order to move test coverage to cover both fallback & new tables. -Important when https://github.com/element-hq/synapse/pull/18375 is -merged. - -In other words, adjust tests to target `compute_interested_room(...)` -(relevant to both new and fallback path) instead of the lower level -`get_room_membership_for_user_at_to_token(...)` that only applies to the -fallback path. - -### Dev notes - -``` -SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.handlers.test_sliding_sync.ComputeInterestedRoomsTestCase_new -``` - -``` -SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.rest.client.sliding_sync -``` - -``` -SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.handlers.test_sliding_sync.ComputeInterestedRoomsTestCase_new.test_display_name_changes_leave_after_token_range -``` - -### Pull Request Checklist - -<!-- Please read -https://element-hq.github.io/synapse/latest/development/contributing_guide.html -before submitting your pull request --> - -* [x] Pull request is based on the develop branch -* [x] Pull request includes a [changelog -file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). -The entry should: -- Be a short description of your change which makes sense to users. -"Fixed a bug that prevented receiving messages from other servers." -instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - - Use markdown where necessary, mostly for `code blocks`. - - End with either a period (.) or an exclamation mark (!). - - Start with a capital letter. -- Feel free to credit yourself, by adding a sentence "Contributed by -@github_username." or "Contributed by [Your Name]." to the end of the -entry. -* [x] [Code -style](https://element-hq.github.io/synapse/latest/code_style.html) is -correct -(run the -[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) - ---------- - -Co-authored-by: Eric Eastwood <erice@element.io> ---- - changelog.d/18399.misc | 1 + - synapse/handlers/sliding_sync/room_lists.py | 122 +- - synapse/storage/_base.py | 10 +- - synapse/storage/databases/main/cache.py | 23 +- - synapse/storage/databases/main/roommember.py | 135 +- - synapse/storage/databases/main/stream.py | 2 + - tests/handlers/test_sliding_sync.py | 1382 +++++++++++++----- - 7 files changed, 1238 insertions(+), 437 deletions(-) - create mode 100644 changelog.d/18399.misc - -diff --git a/changelog.d/18399.misc b/changelog.d/18399.misc -new file mode 100644 -index 0000000000..847dc9a2b1 ---- /dev/null -+++ b/changelog.d/18399.misc -@@ -0,0 +1 @@ -+Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Simplified Sliding Sync room list tests to cover both new and fallback logic paths. -diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py -index a1730b7e05..7e3cf539df 100644 ---- a/synapse/handlers/sliding_sync/room_lists.py -+++ b/synapse/handlers/sliding_sync/room_lists.py -@@ -244,14 +244,47 @@ class SlidingSyncRoomLists: - # Note: this won't include rooms the user has left themselves. We add back - # `newly_left` rooms below. This is more efficient than fetching all rooms and - # then filtering out the old left rooms. -- room_membership_for_user_map = await self.store.get_sliding_sync_rooms_for_user( -- user_id -+ room_membership_for_user_map = ( -+ await self.store.get_sliding_sync_rooms_for_user_from_membership_snapshots( -+ user_id -+ ) -+ ) -+ # To play nice with the rewind logic below, we need to go fetch the rooms the -+ # user has left themselves but only if it changed after the `to_token`. -+ # -+ # If a leave happens *after* the token range, we may have still been joined (or -+ # any non-self-leave which is relevant to sync) to the room before so we need to -+ # include it in the list of potentially relevant rooms and apply our rewind -+ # logic (outside of this function) to see if it's actually relevant. -+ # -+ # We do this separately from -+ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` as those results -+ # are cached and the `to_token` isn't very cache friendly (people are constantly -+ # requesting with new tokens) so we separate it out here. -+ self_leave_room_membership_for_user_map = ( -+ await self.store.get_sliding_sync_self_leave_rooms_after_to_token( -+ user_id, to_token -+ ) - ) -+ if self_leave_room_membership_for_user_map: -+ # FIXME: It would be nice to avoid this copy but since -+ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it -+ # can't return a mutable value like a `dict`. We make the copy to get a -+ # mutable dict that we can change. We try to only make a copy when necessary -+ # (if we actually need to change something) as in most cases, the logic -+ # doesn't need to run. -+ room_membership_for_user_map = dict(room_membership_for_user_map) -+ room_membership_for_user_map.update(self_leave_room_membership_for_user_map) - - # Remove invites from ignored users - ignored_users = await self.store.ignored_users(user_id) - if ignored_users: -- # TODO: It would be nice to avoid these copies -+ # FIXME: It would be nice to avoid this copy but since -+ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it -+ # can't return a mutable value like a `dict`. We make the copy to get a -+ # mutable dict that we can change. We try to only make a copy when necessary -+ # (if we actually need to change something) as in most cases, the logic -+ # doesn't need to run. - room_membership_for_user_map = dict(room_membership_for_user_map) - # Make a copy so we don't run into an error: `dictionary changed size during - # iteration`, when we remove items -@@ -263,11 +296,23 @@ class SlidingSyncRoomLists: - ): - room_membership_for_user_map.pop(room_id, None) - -+ ( -+ newly_joined_room_ids, -+ newly_left_room_map, -+ ) = await self._get_newly_joined_and_left_rooms( -+ user_id, from_token=from_token, to_token=to_token -+ ) -+ - changes = await self._get_rewind_changes_to_current_membership_to_token( - sync_config.user, room_membership_for_user_map, to_token=to_token - ) - if changes: -- # TODO: It would be nice to avoid these copies -+ # FIXME: It would be nice to avoid this copy but since -+ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it -+ # can't return a mutable value like a `dict`. We make the copy to get a -+ # mutable dict that we can change. We try to only make a copy when necessary -+ # (if we actually need to change something) as in most cases, the logic -+ # doesn't need to run. - room_membership_for_user_map = dict(room_membership_for_user_map) - for room_id, change in changes.items(): - if change is None: -@@ -278,7 +323,7 @@ class SlidingSyncRoomLists: - existing_room = room_membership_for_user_map.get(room_id) - if existing_room is not None: - # Update room membership events to the point in time of the `to_token` -- room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( -+ room_for_user = RoomsForUserSlidingSync( - room_id=room_id, - sender=change.sender, - membership=change.membership, -@@ -290,18 +335,18 @@ class SlidingSyncRoomLists: - room_type=existing_room.room_type, - is_encrypted=existing_room.is_encrypted, - ) -- -- ( -- newly_joined_room_ids, -- newly_left_room_map, -- ) = await self._get_newly_joined_and_left_rooms( -- user_id, from_token=from_token, to_token=to_token -- ) -- dm_room_ids = await self._get_dm_rooms_for_user(user_id) -+ if filter_membership_for_sync( -+ user_id=user_id, -+ room_membership_for_user=room_for_user, -+ newly_left=room_id in newly_left_room_map, -+ ): -+ room_membership_for_user_map[room_id] = room_for_user -+ else: -+ room_membership_for_user_map.pop(room_id, None) - - # Add back `newly_left` rooms (rooms left in the from -> to token range). - # -- # We do this because `get_sliding_sync_rooms_for_user(...)` doesn't include -+ # We do this because `get_sliding_sync_rooms_for_user_from_membership_snapshots(...)` doesn't include - # rooms that the user left themselves as it's more efficient to add them back - # here than to fetch all rooms and then filter out the old left rooms. The user - # only leaves a room once in a blue moon so this barely needs to run. -@@ -310,7 +355,12 @@ class SlidingSyncRoomLists: - newly_left_room_map.keys() - room_membership_for_user_map.keys() - ) - if missing_newly_left_rooms: -- # TODO: It would be nice to avoid these copies -+ # FIXME: It would be nice to avoid this copy but since -+ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it -+ # can't return a mutable value like a `dict`. We make the copy to get a -+ # mutable dict that we can change. We try to only make a copy when necessary -+ # (if we actually need to change something) as in most cases, the logic -+ # doesn't need to run. - room_membership_for_user_map = dict(room_membership_for_user_map) - for room_id in missing_newly_left_rooms: - newly_left_room_for_user = newly_left_room_map[room_id] -@@ -327,14 +377,21 @@ class SlidingSyncRoomLists: - # If the membership exists, it's just a normal user left the room on - # their own - if newly_left_room_for_user_sliding_sync is not None: -- room_membership_for_user_map[room_id] = ( -- newly_left_room_for_user_sliding_sync -- ) -+ if filter_membership_for_sync( -+ user_id=user_id, -+ room_membership_for_user=newly_left_room_for_user_sliding_sync, -+ newly_left=room_id in newly_left_room_map, -+ ): -+ room_membership_for_user_map[room_id] = ( -+ newly_left_room_for_user_sliding_sync -+ ) -+ else: -+ room_membership_for_user_map.pop(room_id, None) - - change = changes.get(room_id) - if change is not None: - # Update room membership events to the point in time of the `to_token` -- room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( -+ room_for_user = RoomsForUserSlidingSync( - room_id=room_id, - sender=change.sender, - membership=change.membership, -@@ -346,6 +403,14 @@ class SlidingSyncRoomLists: - room_type=newly_left_room_for_user_sliding_sync.room_type, - is_encrypted=newly_left_room_for_user_sliding_sync.is_encrypted, - ) -+ if filter_membership_for_sync( -+ user_id=user_id, -+ room_membership_for_user=room_for_user, -+ newly_left=room_id in newly_left_room_map, -+ ): -+ room_membership_for_user_map[room_id] = room_for_user -+ else: -+ room_membership_for_user_map.pop(room_id, None) - - # If we are `newly_left` from the room but can't find any membership, - # then we have been "state reset" out of the room -@@ -367,7 +432,7 @@ class SlidingSyncRoomLists: - newly_left_room_for_user.event_pos.to_room_stream_token(), - ) - -- room_membership_for_user_map[room_id] = RoomsForUserSlidingSync( -+ room_for_user = RoomsForUserSlidingSync( - room_id=room_id, - sender=newly_left_room_for_user.sender, - membership=newly_left_room_for_user.membership, -@@ -378,6 +443,16 @@ class SlidingSyncRoomLists: - room_type=room_type, - is_encrypted=is_encrypted, - ) -+ if filter_membership_for_sync( -+ user_id=user_id, -+ room_membership_for_user=room_for_user, -+ newly_left=room_id in newly_left_room_map, -+ ): -+ room_membership_for_user_map[room_id] = room_for_user -+ else: -+ room_membership_for_user_map.pop(room_id, None) -+ -+ dm_room_ids = await self._get_dm_rooms_for_user(user_id) - - if sync_config.lists: - sync_room_map = room_membership_for_user_map -@@ -493,7 +568,12 @@ class SlidingSyncRoomLists: - - if sync_config.room_subscriptions: - with start_active_span("assemble_room_subscriptions"): -- # TODO: It would be nice to avoid these copies -+ # FIXME: It would be nice to avoid this copy but since -+ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it -+ # can't return a mutable value like a `dict`. We make the copy to get a -+ # mutable dict that we can change. We try to only make a copy when necessary -+ # (if we actually need to change something) as in most cases, the logic -+ # doesn't need to run. - room_membership_for_user_map = dict(room_membership_for_user_map) - - # Find which rooms are partially stated and may need to be filtered out -diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py -index 7251e72e3a..b5fe7dd858 100644 ---- a/synapse/storage/_base.py -+++ b/synapse/storage/_base.py -@@ -130,7 +130,7 @@ class SQLBaseStore(metaclass=ABCMeta): - "_get_rooms_for_local_user_where_membership_is_inner", (user_id,) - ) - self._attempt_to_invalidate_cache( -- "get_sliding_sync_rooms_for_user", (user_id,) -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", (user_id,) - ) - - # Purge other caches based on room state. -@@ -138,7 +138,9 @@ class SQLBaseStore(metaclass=ABCMeta): - self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,)) - self._attempt_to_invalidate_cache("get_room_type", (room_id,)) - self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) -- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) -+ self._attempt_to_invalidate_cache( -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None -+ ) - - def _invalidate_state_caches_all(self, room_id: str) -> None: - """Invalidates caches that are based on the current state, but does -@@ -168,7 +170,9 @@ class SQLBaseStore(metaclass=ABCMeta): - self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) - self._attempt_to_invalidate_cache("get_room_type", (room_id,)) - self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) -- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) -+ self._attempt_to_invalidate_cache( -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None -+ ) - - def _attempt_to_invalidate_cache( - self, cache_name: str, key: Optional[Collection[Any]] -diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py -index f364464c23..9418fb6dd7 100644 ---- a/synapse/storage/databases/main/cache.py -+++ b/synapse/storage/databases/main/cache.py -@@ -307,7 +307,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): - "get_rooms_for_user", (data.state_key,) - ) - self._attempt_to_invalidate_cache( -- "get_sliding_sync_rooms_for_user", None -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None - ) - self._membership_stream_cache.entity_has_changed(data.state_key, token) # type: ignore[attr-defined] - elif data.type == EventTypes.RoomEncryption: -@@ -319,7 +319,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): - - if (data.type, data.state_key) in SLIDING_SYNC_RELEVANT_STATE_SET: - self._attempt_to_invalidate_cache( -- "get_sliding_sync_rooms_for_user", None -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None - ) - elif row.type == EventsStreamAllStateRow.TypeId: - assert isinstance(data, EventsStreamAllStateRow) -@@ -330,7 +330,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): - self._attempt_to_invalidate_cache("get_rooms_for_user", None) - self._attempt_to_invalidate_cache("get_room_type", (data.room_id,)) - self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,)) -- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) -+ self._attempt_to_invalidate_cache( -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None -+ ) - else: - raise Exception("Unknown events stream row type %s" % (row.type,)) - -@@ -394,7 +396,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): - "_get_rooms_for_local_user_where_membership_is_inner", (state_key,) - ) - self._attempt_to_invalidate_cache( -- "get_sliding_sync_rooms_for_user", (state_key,) -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", -+ (state_key,), - ) - - self._attempt_to_invalidate_cache( -@@ -413,7 +416,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): - self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) - - if (etype, state_key) in SLIDING_SYNC_RELEVANT_STATE_SET: -- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) -+ self._attempt_to_invalidate_cache( -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None -+ ) - - if relates_to: - self._attempt_to_invalidate_cache( -@@ -470,7 +475,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): - self._attempt_to_invalidate_cache( - "_get_rooms_for_local_user_where_membership_is_inner", None - ) -- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) -+ self._attempt_to_invalidate_cache( -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None -+ ) - self._attempt_to_invalidate_cache("did_forget", None) - self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) - self._attempt_to_invalidate_cache("get_references_for_event", None) -@@ -529,7 +536,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): - self._attempt_to_invalidate_cache( - "get_current_hosts_in_room_ordered", (room_id,) - ) -- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None) -+ self._attempt_to_invalidate_cache( -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None -+ ) - self._attempt_to_invalidate_cache("did_forget", None) - self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) - self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) -diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py -index b8c78baa6c..2084776543 100644 ---- a/synapse/storage/databases/main/roommember.py -+++ b/synapse/storage/databases/main/roommember.py -@@ -53,6 +53,7 @@ from synapse.storage.database import ( - ) - from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore - from synapse.storage.databases.main.events_worker import EventsWorkerStore -+from synapse.storage.databases.main.stream import _filter_results_by_stream - from synapse.storage.engines import Sqlite3Engine - from synapse.storage.roommember import ( - MemberSummary, -@@ -65,6 +66,7 @@ from synapse.types import ( - PersistedEventPosition, - StateMap, - StrCollection, -+ StreamToken, - get_domain_from_id, - ) - from synapse.util.caches.descriptors import _CacheContext, cached, cachedList -@@ -1389,7 +1391,9 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): - txn, self.get_forgotten_rooms_for_user, (user_id,) - ) - self._invalidate_cache_and_stream( -- txn, self.get_sliding_sync_rooms_for_user, (user_id,) -+ txn, -+ self.get_sliding_sync_rooms_for_user_from_membership_snapshots, -+ (user_id,), - ) - - await self.db_pool.runInteraction("forget_membership", f) -@@ -1421,25 +1425,30 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): - ) - - @cached(iterable=True, max_entries=10000) -- async def get_sliding_sync_rooms_for_user( -- self, -- user_id: str, -+ async def get_sliding_sync_rooms_for_user_from_membership_snapshots( -+ self, user_id: str - ) -> Mapping[str, RoomsForUserSlidingSync]: -- """Get all the rooms for a user to handle a sliding sync request. -+ """ -+ Get all the rooms for a user to handle a sliding sync request from the -+ `sliding_sync_membership_snapshots` table. These will be current memberships and -+ need to be rewound to the token range. - - Ignores forgotten rooms and rooms that the user has left themselves. - -+ Args: -+ user_id: The user ID to get the rooms for. -+ - Returns: - Map from room ID to membership info - """ - -- def get_sliding_sync_rooms_for_user_txn( -+ def _txn( - txn: LoggingTransaction, - ) -> Dict[str, RoomsForUserSlidingSync]: - # XXX: If you use any new columns that can change (like from - # `sliding_sync_joined_rooms` or `forgotten`), make sure to bust the -- # `get_sliding_sync_rooms_for_user` cache in the appropriate places (and add -- # tests). -+ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` cache in the -+ # appropriate places (and add tests). - sql = """ - SELECT m.room_id, m.sender, m.membership, m.membership_event_id, - r.room_version, -@@ -1455,6 +1464,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): - AND (m.membership != 'leave' OR m.user_id != m.sender) - """ - txn.execute(sql, (user_id,)) -+ - return { - row[0]: RoomsForUserSlidingSync( - room_id=row[0], -@@ -1475,8 +1485,113 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): - } - - return await self.db_pool.runInteraction( -- "get_sliding_sync_rooms_for_user", -- get_sliding_sync_rooms_for_user_txn, -+ "get_sliding_sync_rooms_for_user_from_membership_snapshots", -+ _txn, -+ ) -+ -+ async def get_sliding_sync_self_leave_rooms_after_to_token( -+ self, -+ user_id: str, -+ to_token: StreamToken, -+ ) -> Dict[str, RoomsForUserSlidingSync]: -+ """ -+ Get all the self-leave rooms for a user after the `to_token` (outside the token -+ range) that are potentially relevant[1] and needed to handle a sliding sync -+ request. The results are from the `sliding_sync_membership_snapshots` table and -+ will be current memberships and need to be rewound to the token range. -+ -+ [1] If a leave happens after the token range, we may have still been joined (or -+ any non-self-leave which is relevant to sync) to the room before so we need to -+ include it in the list of potentially relevant rooms and apply -+ our rewind logic (outside of this function) to see if it's actually relevant. -+ -+ This is basically a sister-function to -+ `get_sliding_sync_rooms_for_user_from_membership_snapshots`. We could -+ alternatively incorporate this logic into -+ `get_sliding_sync_rooms_for_user_from_membership_snapshots` but those results -+ are cached and the `to_token` isn't very cache friendly (people are constantly -+ requesting with new tokens) so we separate it out here. -+ -+ Args: -+ user_id: The user ID to get the rooms for. -+ to_token: Any self-leave memberships after this position will be returned. -+ -+ Returns: -+ Map from room ID to membership info -+ """ -+ # TODO: Potential to check -+ # `self._membership_stream_cache.has_entity_changed(...)` as an early-return -+ # shortcut. -+ -+ def _txn( -+ txn: LoggingTransaction, -+ ) -> Dict[str, RoomsForUserSlidingSync]: -+ sql = """ -+ SELECT m.room_id, m.sender, m.membership, m.membership_event_id, -+ r.room_version, -+ m.event_instance_name, m.event_stream_ordering, -+ m.has_known_state, -+ m.room_type, -+ m.is_encrypted -+ FROM sliding_sync_membership_snapshots AS m -+ INNER JOIN rooms AS r USING (room_id) -+ WHERE user_id = ? -+ AND m.forgotten = 0 -+ AND m.membership = 'leave' -+ AND m.user_id = m.sender -+ AND (m.event_stream_ordering > ?) -+ """ -+ # If a leave happens after the token range, we may have still been joined -+ # (or any non-self-leave which is relevant to sync) to the room before so we -+ # need to include it in the list of potentially relevant rooms and apply our -+ # rewind logic (outside of this function). -+ # -+ # To handle tokens with a non-empty instance_map we fetch more -+ # results than necessary and then filter down -+ min_to_token_position = to_token.room_key.stream -+ txn.execute(sql, (user_id, min_to_token_position)) -+ -+ # Map from room_id to membership info -+ room_membership_for_user_map: Dict[str, RoomsForUserSlidingSync] = {} -+ for row in txn: -+ room_for_user = RoomsForUserSlidingSync( -+ room_id=row[0], -+ sender=row[1], -+ membership=row[2], -+ event_id=row[3], -+ room_version_id=row[4], -+ event_pos=PersistedEventPosition(row[5], row[6]), -+ has_known_state=bool(row[7]), -+ room_type=row[8], -+ is_encrypted=bool(row[9]), -+ ) -+ -+ # We filter out unknown room versions proactively. They shouldn't go -+ # down sync and their metadata may be in a broken state (causing -+ # errors). -+ if row[4] not in KNOWN_ROOM_VERSIONS: -+ continue -+ -+ # We only want to include the self-leave membership if it happened after -+ # the token range. -+ # -+ # Since the database pulls out more than necessary, we need to filter it -+ # down here. -+ if _filter_results_by_stream( -+ lower_token=None, -+ upper_token=to_token.room_key, -+ instance_name=room_for_user.event_pos.instance_name, -+ stream_ordering=room_for_user.event_pos.stream, -+ ): -+ continue -+ -+ room_membership_for_user_map[room_for_user.room_id] = room_for_user -+ -+ return room_membership_for_user_map -+ -+ return await self.db_pool.runInteraction( -+ "get_sliding_sync_self_leave_rooms_after_to_token", -+ _txn, - ) - - async def get_sliding_sync_room_for_user( -diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py -index 00e5208674..c52389b8a9 100644 ---- a/synapse/storage/databases/main/stream.py -+++ b/synapse/storage/databases/main/stream.py -@@ -453,6 +453,8 @@ def _filter_results_by_stream( - stream_ordering falls between the two tokens (taking a None - token to mean unbounded). - -+ The token range is defined by > `lower_token` and <= `upper_token`. -+ - Used to filter results from fetching events in the DB against the given - tokens. This is necessary to handle the case where the tokens include - position maps, which we handle by fetching more than necessary from the DB -diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py -index 5b7e2937f8..cbacf21ae7 100644 ---- a/tests/handlers/test_sliding_sync.py -+++ b/tests/handlers/test_sliding_sync.py -@@ -22,7 +22,7 @@ from typing import AbstractSet, Dict, Mapping, Optional, Set, Tuple - from unittest.mock import patch - - import attr --from parameterized import parameterized -+from parameterized import parameterized, parameterized_class - - from twisted.test.proto_helpers import MemoryReactor - -@@ -43,13 +43,15 @@ from synapse.rest import admin - from synapse.rest.client import knock, login, room - from synapse.server import HomeServer - from synapse.storage.util.id_generators import MultiWriterIdGenerator --from synapse.types import JsonDict, StateMap, StreamToken, UserID --from synapse.types.handlers.sliding_sync import SlidingSyncConfig -+from synapse.types import JsonDict, StateMap, StreamToken, UserID, create_requester -+from synapse.types.handlers.sliding_sync import PerConnectionState, SlidingSyncConfig - from synapse.types.state import StateFilter - from synapse.util import Clock - - from tests import unittest - from tests.replication._base import BaseMultiWorkerStreamTestCase -+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase -+from tests.test_utils.event_injection import create_event - from tests.unittest import HomeserverTestCase, TestCase - - logger = logging.getLogger(__name__) -@@ -572,9 +574,23 @@ class RoomSyncConfigTestCase(TestCase): - self._assert_room_config_equal(combined_config, expected, "A into B") - - --class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): -+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the -+# foreground update for -+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by -+# https://github.com/element-hq/synapse/issues/17623) -+@parameterized_class( -+ ("use_new_tables",), -+ [ -+ (True,), -+ (False,), -+ ], -+ class_name_func=lambda cls, -+ num, -+ params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", -+) -+class ComputeInterestedRoomsTestCase(SlidingSyncBase): - """ -- Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it returns -+ Tests Sliding Sync handler `compute_interested_rooms()` to make sure it returns - the correct list of rooms IDs. - """ - -@@ -596,6 +612,11 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - self.store = self.hs.get_datastores().main - self.event_sources = hs.get_event_sources() - self.storage_controllers = hs.get_storage_controllers() -+ persistence = self.hs.get_storage_controllers().persistence -+ assert persistence is not None -+ self.persistence = persistence -+ -+ super().prepare(reactor, clock, hs) - - def test_no_rooms(self) -> None: - """ -@@ -606,15 +627,28 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - now_token = self.event_sources.get_current_token() - -- room_id_results, _, _ = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=now_token, - to_token=now_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) - -- self.assertEqual(room_id_results.keys(), set()) -+ self.assertIncludes(room_id_results, set(), exact=True) - - def test_get_newly_joined_room(self) -> None: - """ -@@ -633,22 +667,44 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - after_room_token = self.event_sources.get_current_token() - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room_token, - to_token=after_room_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - -- self.assertEqual(room_id_results.keys(), {room_id}) -+ self.assertIncludes( -+ room_id_results, -+ {room_id}, -+ exact=True, -+ ) - # It should be pointing to the join event (latest membership event in the - # from/to range) - self.assertEqual( -- room_id_results[room_id].event_id, -+ interested_rooms.room_membership_for_user_map[room_id].event_id, - join_response["event_id"], - ) -- self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id].membership, -+ Membership.JOIN, -+ ) - # We should be considered `newly_joined` because we joined during the token - # range - self.assertTrue(room_id in newly_joined) -@@ -668,22 +724,40 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - after_room_token = self.event_sources.get_current_token() - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room_token, - to_token=after_room_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - -- self.assertEqual(room_id_results.keys(), {room_id}) -+ self.assertIncludes(room_id_results, {room_id}, exact=True) - # It should be pointing to the join event (latest membership event in the - # from/to range) - self.assertEqual( -- room_id_results[room_id].event_id, -+ interested_rooms.room_membership_for_user_map[room_id].event_id, - join_response["event_id"], - ) -- self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id].membership, -+ Membership.JOIN, -+ ) - # We should *NOT* be `newly_joined` because we joined before the token range - self.assertTrue(room_id not in newly_joined) - self.assertTrue(room_id not in newly_left) -@@ -742,46 +816,71 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - after_room_token = self.event_sources.get_current_token() - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room_token, - to_token=after_room_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Ensure that the invited, ban, and knock rooms show up -- self.assertEqual( -- room_id_results.keys(), -+ self.assertIncludes( -+ room_id_results, - { - invited_room_id, - ban_room_id, - knock_room_id, - }, -+ exact=True, - ) - # It should be pointing to the the respective membership event (latest - # membership event in the from/to range) - self.assertEqual( -- room_id_results[invited_room_id].event_id, -+ interested_rooms.room_membership_for_user_map[invited_room_id].event_id, - invite_response["event_id"], - ) -- self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[invited_room_id].membership, -+ Membership.INVITE, -+ ) - self.assertTrue(invited_room_id not in newly_joined) - self.assertTrue(invited_room_id not in newly_left) - - self.assertEqual( -- room_id_results[ban_room_id].event_id, -+ interested_rooms.room_membership_for_user_map[ban_room_id].event_id, - ban_response["event_id"], - ) -- self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[ban_room_id].membership, -+ Membership.BAN, -+ ) - self.assertTrue(ban_room_id not in newly_joined) - self.assertTrue(ban_room_id not in newly_left) - - self.assertEqual( -- room_id_results[knock_room_id].event_id, -+ interested_rooms.room_membership_for_user_map[knock_room_id].event_id, - knock_room_membership_state_event.event_id, - ) -- self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[knock_room_id].membership, -+ Membership.KNOCK, -+ ) - self.assertTrue(knock_room_id not in newly_joined) - self.assertTrue(knock_room_id not in newly_left) - -@@ -814,23 +913,43 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - after_kick_token = self.event_sources.get_current_token() - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_kick_token, - to_token=after_kick_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # The kicked room should show up -- self.assertEqual(room_id_results.keys(), {kick_room_id}) -+ self.assertIncludes(room_id_results, {kick_room_id}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[kick_room_id].event_id, -+ interested_rooms.room_membership_for_user_map[kick_room_id].event_id, - kick_response["event_id"], - ) -- self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) -- self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[kick_room_id].membership, -+ Membership.LEAVE, -+ ) -+ self.assertNotEqual( -+ interested_rooms.room_membership_for_user_map[kick_room_id].sender, user1_id -+ ) - # We should *NOT* be `newly_joined` because we were not joined at the the time - # of the `to_token`. - self.assertTrue(kick_room_id not in newly_joined) -@@ -907,16 +1026,29 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - ) - self.assertEqual(channel.code, 200, channel.result) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room_forgets, - to_token=before_room_forgets, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) - - # We shouldn't see the room because it was forgotten -- self.assertEqual(room_id_results.keys(), set()) -+ self.assertIncludes(room_id_results, set(), exact=True) - - def test_newly_left_rooms(self) -> None: - """ -@@ -927,7 +1059,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - # Leave before we calculate the `from_token` - room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) -- leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok) -+ _leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - - after_room1_token = self.event_sources.get_current_token() - -@@ -937,31 +1069,52 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - after_room2_token = self.event_sources.get_current_token() - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_room2_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - -- self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) -- -- self.assertEqual( -- room_id_results[room_id1].event_id, -- leave_response1["event_id"], -+ # `room_id1` should not show up because it was left before the token range. -+ # `room_id2` should show up because it is `newly_left` within the token range. -+ self.assertIncludes( -+ room_id_results, -+ {room_id2}, -+ exact=True, -+ message="Corresponding map to disambiguate the opaque room IDs: " -+ + str( -+ { -+ "room_id1": room_id1, -+ "room_id2": room_id2, -+ } -+ ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) -- # We should *NOT* be `newly_joined` or `newly_left` because that happened before -- # the from/to range -- self.assertTrue(room_id1 not in newly_joined) -- self.assertTrue(room_id1 not in newly_left) - - self.assertEqual( -- room_id_results[room_id2].event_id, -+ interested_rooms.room_membership_for_user_map[room_id2].event_id, - leave_response2["event_id"], - ) -- self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id2].membership, -+ Membership.LEAVE, -+ ) - # We should *NOT* be `newly_joined` because we are instead `newly_left` - self.assertTrue(room_id2 not in newly_joined) - self.assertTrue(room_id2 in newly_left) -@@ -987,21 +1140,39 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id2, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_response1["event_id"], - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should be `newly_joined` because we joined during the token range - self.assertTrue(room_id1 in newly_joined) - self.assertTrue(room_id1 not in newly_left) -@@ -1027,20 +1198,35 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - # Leave the room after we already have our tokens - leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # We should still see the room because we were joined during the - # from_token/to_token time period. -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_response["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1050,7 +1236,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should be `newly_joined` because we joined during the token range - self.assertTrue(room_id1 in newly_joined) - self.assertTrue(room_id1 not in newly_left) -@@ -1074,19 +1263,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - # Leave the room after we already have our tokens - leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # We should still see the room because we were joined before the `from_token` -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_response["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1096,7 +1300,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should *NOT* be `newly_joined` because we joined before the token range - self.assertTrue(room_id1 not in newly_joined) - self.assertTrue(room_id1 not in newly_left) -@@ -1138,19 +1345,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - join_response2 = self.helper.join(kick_room_id, user1_id, tok=user1_tok) - leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_kick_token, - to_token=after_kick_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # We shouldn't see the room because it was forgotten -- self.assertEqual(room_id_results.keys(), {kick_room_id}) -+ self.assertIncludes(room_id_results, {kick_room_id}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[kick_room_id].event_id, -+ interested_rooms.room_membership_for_user_map[kick_room_id].event_id, - kick_response["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1162,8 +1384,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) -- self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[kick_room_id].membership, -+ Membership.LEAVE, -+ ) -+ self.assertNotEqual( -+ interested_rooms.room_membership_for_user_map[kick_room_id].sender, user1_id -+ ) - # We should *NOT* be `newly_joined` because we were kicked - self.assertTrue(kick_room_id not in newly_joined) - self.assertTrue(kick_room_id not in newly_left) -@@ -1194,19 +1421,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) - leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should still show up because it's newly_left during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - leave_response1["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1218,7 +1460,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.LEAVE, -+ ) - # We should *NOT* be `newly_joined` because we are actually `newly_left` during - # the token range - self.assertTrue(room_id1 not in newly_joined) -@@ -1249,19 +1494,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - # Join the room after we already have our tokens - join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should still show up because it's newly_left during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - leave_response1["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1272,7 +1532,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.LEAVE, -+ ) - # We should *NOT* be `newly_joined` because we are actually `newly_left` during - # the token range - self.assertTrue(room_id1 not in newly_joined) -@@ -1301,47 +1564,53 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - # Join and leave the room2 before the `to_token` - self.helper.join(room_id2, user1_id, tok=user1_tok) -- leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) -+ _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) - - after_room1_token = self.event_sources.get_current_token() - - # Join the room2 after we already have our tokens - self.helper.join(room_id2, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=None, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Only rooms we were joined to before the `to_token` should show up -- self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - - # Room1 - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_response1["event_id"], - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -- # We should *NOT* be `newly_joined`/`newly_left` because there is no -- # `from_token` to define a "live" range to compare against -- self.assertTrue(room_id1 not in newly_joined) -- self.assertTrue(room_id1 not in newly_left) -- -- # Room2 -- # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id2].event_id, -- leave_response2["event_id"], -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, - ) -- self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) - # We should *NOT* be `newly_joined`/`newly_left` because there is no - # `from_token` to define a "live" range to compare against -- self.assertTrue(room_id2 not in newly_joined) -- self.assertTrue(room_id2 not in newly_left) -+ self.assertTrue(room_id1 not in newly_joined) -+ self.assertTrue(room_id1 not in newly_left) - - def test_from_token_ahead_of_to_token(self) -> None: - """ -@@ -1365,7 +1634,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - # Join and leave the room2 before `to_token` - _join_room2_response1 = self.helper.join(room_id2, user1_id, tok=user1_tok) -- leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok) -+ _leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok) - - # Note: These are purposely swapped. The `from_token` should come after - # the `to_token` in this test -@@ -1390,55 +1659,70 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - # Join the room4 after we already have our tokens - self.helper.join(room_id4, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=from_token, - to_token=to_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # In the "current" state snapshot, we're joined to all of the rooms but in the - # from/to token range... - self.assertIncludes( -- room_id_results.keys(), -+ room_id_results, - { - # Included because we were joined before both tokens - room_id1, -- # Included because we had membership before the to_token -- room_id2, -+ # Excluded because we left before the `from_token` and `to_token` -+ # room_id2, - # Excluded because we joined after the `to_token` - # room_id3, - # Excluded because we joined after the `to_token` - # room_id4, - }, - exact=True, -+ message="Corresponding map to disambiguate the opaque room IDs: " -+ + str( -+ { -+ "room_id1": room_id1, -+ "room_id2": room_id2, -+ "room_id3": room_id3, -+ "room_id4": room_id4, -+ } -+ ), - ) - - # Room1 - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_room1_response1["event_id"], - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should *NOT* be `newly_joined`/`newly_left` because we joined `room1` - # before either of the tokens - self.assertTrue(room_id1 not in newly_joined) - self.assertTrue(room_id1 not in newly_left) - -- # Room2 -- # It should be pointing to the latest membership event in the from/to range -- self.assertEqual( -- room_id_results[room_id2].event_id, -- leave_room2_response1["event_id"], -- ) -- self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) -- # We should *NOT* be `newly_joined`/`newly_left` because we joined and left -- # `room1` before either of the tokens -- self.assertTrue(room_id2 not in newly_joined) -- self.assertTrue(room_id2 not in newly_left) -- - def test_leave_before_range_and_join_leave_after_to_token(self) -> None: - """ - Test old left rooms. But we're also testing that joining and leaving after the -@@ -1455,7 +1739,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) - # Join and leave the room before the from/to range - self.helper.join(room_id1, user1_id, tok=user1_tok) -- leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) -+ self.helper.leave(room_id1, user1_id, tok=user1_tok) - - after_room1_token = self.event_sources.get_current_token() - -@@ -1463,25 +1747,28 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - self.helper.join(room_id1, user1_id, tok=user1_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) - -- self.assertEqual(room_id_results.keys(), {room_id1}) -- # It should be pointing to the latest membership event in the from/to range -- self.assertEqual( -- room_id_results[room_id1].event_id, -- leave_response["event_id"], -- ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) -- # We should *NOT* be `newly_joined`/`newly_left` because we joined and left -- # `room1` before either of the tokens -- self.assertTrue(room_id1 not in newly_joined) -- self.assertTrue(room_id1 not in newly_left) -+ self.assertIncludes(room_id_results, set(), exact=True) - - def test_leave_before_range_and_join_after_to_token(self) -> None: - """ -@@ -1499,32 +1786,35 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) - # Join and leave the room before the from/to range - self.helper.join(room_id1, user1_id, tok=user1_tok) -- leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) -+ self.helper.leave(room_id1, user1_id, tok=user1_tok) - - after_room1_token = self.event_sources.get_current_token() - - # Join the room after we already have our tokens - self.helper.join(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) - -- self.assertEqual(room_id_results.keys(), {room_id1}) -- # It should be pointing to the latest membership event in the from/to range -- self.assertEqual( -- room_id_results[room_id1].event_id, -- leave_response["event_id"], -- ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) -- # We should *NOT* be `newly_joined`/`newly_left` because we joined and left -- # `room1` before either of the tokens -- self.assertTrue(room_id1 not in newly_joined) -- self.assertTrue(room_id1 not in newly_left) -+ self.assertIncludes(room_id_results, set(), exact=True) - - def test_join_leave_multiple_times_during_range_and_after_to_token( - self, -@@ -1556,19 +1846,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok) - leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because it was newly_left and joined during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_response2["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1582,7 +1887,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should be `newly_joined` because we joined during the token range - self.assertTrue(room_id1 in newly_joined) - # We should *NOT* be `newly_left` because we joined during the token range and -@@ -1618,19 +1926,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok) - leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because we were joined before the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_response2["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1644,7 +1967,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should *NOT* be `newly_joined` because we joined before the token range - self.assertTrue(room_id1 not in newly_joined) - self.assertTrue(room_id1 not in newly_left) -@@ -1677,19 +2003,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - join_respsonse = self.helper.join(room_id1, user1_id, tok=user1_tok) - leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because we were invited before the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - invite_response["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1700,7 +2041,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.INVITE) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.INVITE, -+ ) - # We should *NOT* be `newly_joined` because we were only invited before the - # token range - self.assertTrue(room_id1 not in newly_joined) -@@ -1751,19 +2095,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - tok=user1_tok, - ) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because we were joined during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - displayname_change_during_token_range_response["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1778,7 +2137,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should be `newly_joined` because we joined during the token range - self.assertTrue(room_id1 in newly_joined) - self.assertTrue(room_id1 not in newly_left) -@@ -1816,19 +2178,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - after_change1_token = self.event_sources.get_current_token() - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_change1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because we were joined during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - displayname_change_during_token_range_response["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1840,7 +2217,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should *NOT* be `newly_joined` because we joined before the token range - self.assertTrue(room_id1 not in newly_joined) - self.assertTrue(room_id1 not in newly_left) -@@ -1888,19 +2268,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - tok=user1_tok, - ) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because we were joined before the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - displayname_change_before_token_range_response["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1915,18 +2310,22 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should *NOT* be `newly_joined` because we joined before the token range - self.assertTrue(room_id1 not in newly_joined) - self.assertTrue(room_id1 not in newly_left) - -- def test_display_name_changes_leave_after_token_range( -+ def test_newly_joined_display_name_changes_leave_after_token_range( - self, - ) -> None: - """ - Test that we point to the correct membership event within the from/to range even -- if there are multiple `join` membership events in a row indicating -- `displayname`/`avatar_url` updates and we leave after the `to_token`. -+ if we are `newly_joined` and there are multiple `join` membership events in a -+ row indicating `displayname`/`avatar_url` updates and we leave after the -+ `to_token`. - - See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method. - """ -@@ -1941,6 +2340,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - # leave and can still re-join. - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) - join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) -+ - # Update the displayname during the token range - displayname_change_during_token_range_response = self.helper.send_state( - room_id1, -@@ -1970,19 +2370,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - # Leave after the token - self.helper.leave(room_id1, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because we were joined during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - displayname_change_during_token_range_response["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -1997,11 +2412,118 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should be `newly_joined` because we joined during the token range - self.assertTrue(room_id1 in newly_joined) - self.assertTrue(room_id1 not in newly_left) - -+ def test_display_name_changes_leave_after_token_range( -+ self, -+ ) -> None: -+ """ -+ Test that we point to the correct membership event within the from/to range even -+ if there are multiple `join` membership events in a row indicating -+ `displayname`/`avatar_url` updates and we leave after the `to_token`. -+ -+ See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method. -+ """ -+ user1_id = self.register_user("user1", "pass") -+ user1_tok = self.login(user1_id, "pass") -+ user2_id = self.register_user("user2", "pass") -+ user2_tok = self.login(user2_id, "pass") -+ -+ _before_room1_token = self.event_sources.get_current_token() -+ -+ # We create the room with user2 so the room isn't left with no members when we -+ # leave and can still re-join. -+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) -+ join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) -+ -+ after_join_token = self.event_sources.get_current_token() -+ -+ # Update the displayname during the token range -+ displayname_change_during_token_range_response = self.helper.send_state( -+ room_id1, -+ event_type=EventTypes.Member, -+ state_key=user1_id, -+ body={ -+ "membership": Membership.JOIN, -+ "displayname": "displayname during token range", -+ }, -+ tok=user1_tok, -+ ) -+ -+ after_display_name_change_token = self.event_sources.get_current_token() -+ -+ # Update the displayname after the token range -+ displayname_change_after_token_range_response = self.helper.send_state( -+ room_id1, -+ event_type=EventTypes.Member, -+ state_key=user1_id, -+ body={ -+ "membership": Membership.JOIN, -+ "displayname": "displayname after token range", -+ }, -+ tok=user1_tok, -+ ) -+ -+ # Leave after the token -+ self.helper.leave(room_id1, user1_id, tok=user1_tok) -+ -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), -+ from_token=after_join_token, -+ to_token=after_display_name_change_token, -+ ) -+ ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms -+ -+ # Room should show up because we were joined during the from/to range -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) -+ # It should be pointing to the latest membership event in the from/to range -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, -+ displayname_change_during_token_range_response["event_id"], -+ "Corresponding map to disambiguate the opaque event IDs: " -+ + str( -+ { -+ "join_response": join_response["event_id"], -+ "displayname_change_during_token_range_response": displayname_change_during_token_range_response[ -+ "event_id" -+ ], -+ "displayname_change_after_token_range_response": displayname_change_after_token_range_response[ -+ "event_id" -+ ], -+ } -+ ), -+ ) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) -+ # We only changed our display name during the token range so we shouldn't be -+ # considered `newly_joined` or `newly_left` -+ self.assertTrue(room_id1 not in newly_joined) -+ self.assertTrue(room_id1 not in newly_left) -+ - def test_display_name_changes_join_after_token_range( - self, - ) -> None: -@@ -2038,16 +2560,29 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - tok=user1_tok, - ) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) - - # Room shouldn't show up because we joined after the from/to range -- self.assertEqual(room_id_results.keys(), set()) -+ self.assertIncludes(room_id_results, set(), exact=True) - - def test_newly_joined_with_leave_join_in_token_range( - self, -@@ -2074,22 +2609,40 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - after_more_changes_token = self.event_sources.get_current_token() - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=after_room1_token, - to_token=after_more_changes_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because we were joined during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_response2["event_id"], - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should be considered `newly_joined` because there is some non-join event in - # between our latest join event. - self.assertTrue(room_id1 in newly_joined) -@@ -2139,19 +2692,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - after_room1_token = self.event_sources.get_current_token() - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room1_token, - to_token=after_room1_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room should show up because it was newly_left and joined during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1}) -+ self.assertIncludes(room_id_results, {room_id1}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - displayname_change_during_token_range_response2["event_id"], - "Corresponding map to disambiguate the opaque event IDs: " - + str( -@@ -2166,7 +2734,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - } - ), - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should be `newly_joined` because we first joined during the token range - self.assertTrue(room_id1 in newly_joined) - self.assertTrue(room_id1 not in newly_left) -@@ -2192,7 +2763,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - # Invited and left the room before the token - self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) -- leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) -+ _leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - # Invited to room2 - invite_room2_response = self.helper.invite( - room_id2, src=user2_id, targ=user1_id, tok=user2_tok -@@ -2215,45 +2786,52 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - # Leave room3 - self.helper.leave(room_id3, user1_id, tok=user1_tok) - -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_room3_token, - to_token=after_room3_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - -- self.assertEqual( -- room_id_results.keys(), -+ self.assertIncludes( -+ room_id_results, - { -- # Left before the from/to range -- room_id1, -+ # Excluded because we left before the from/to range -+ # room_id1, - # Invited before the from/to range - room_id2, - # `newly_left` during the from/to range - room_id3, - }, -+ exact=True, - ) - -- # Room1 -- # It should be pointing to the latest membership event in the from/to range -- self.assertEqual( -- room_id_results[room_id1].event_id, -- leave_room1_response["event_id"], -- ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) -- # We should *NOT* be `newly_joined`/`newly_left` because we were invited and left -- # before the token range -- self.assertTrue(room_id1 not in newly_joined) -- self.assertTrue(room_id1 not in newly_left) -- - # Room2 - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id2].event_id, -+ interested_rooms.room_membership_for_user_map[room_id2].event_id, - invite_room2_response["event_id"], - ) -- self.assertEqual(room_id_results[room_id2].membership, Membership.INVITE) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id2].membership, -+ Membership.INVITE, -+ ) - # We should *NOT* be `newly_joined`/`newly_left` because we were invited before - # the token range - self.assertTrue(room_id2 not in newly_joined) -@@ -2262,10 +2840,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - # Room3 - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id3].event_id, -+ interested_rooms.room_membership_for_user_map[room_id3].event_id, - leave_room3_response["event_id"], - ) -- self.assertEqual(room_id_results[room_id3].membership, Membership.LEAVE) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id3].membership, -+ Membership.LEAVE, -+ ) - # We should be `newly_left` because we were invited and left during - # the token range - self.assertTrue(room_id3 not in newly_joined) -@@ -2282,7 +2863,16 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - user2_tok = self.login(user2_id, "pass") - - # The room where the state reset will happen -- room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) -+ room_id1 = self.helper.create_room_as( -+ user2_id, -+ is_public=True, -+ tok=user2_tok, -+ ) -+ # Create a dummy event for us to point back to for the state reset -+ dummy_event_response = self.helper.send(room_id1, "test", tok=user2_tok) -+ dummy_event_id = dummy_event_response["event_id"] -+ -+ # Join after the dummy event - join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Join another room so we don't hit the short-circuit and return early if they -@@ -2292,92 +2882,97 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): - - before_reset_token = self.event_sources.get_current_token() - -- # Send another state event to make a position for the state reset to happen at -- dummy_state_response = self.helper.send_state( -- room_id1, -- event_type="foobarbaz", -- state_key="", -- body={"foo": "bar"}, -- tok=user2_tok, -- ) -- dummy_state_pos = self.get_success( -- self.store.get_position_for_event(dummy_state_response["event_id"]) -- ) -- -- # Mock a state reset removing the membership for user1 in the current state -- self.get_success( -- self.store.db_pool.simple_delete( -- table="current_state_events", -- keyvalues={ -- "room_id": room_id1, -- "type": EventTypes.Member, -- "state_key": user1_id, -- }, -- desc="state reset user in current_state_events", -+ # Trigger a state reset -+ join_rule_event, join_rule_context = self.get_success( -+ create_event( -+ self.hs, -+ prev_event_ids=[dummy_event_id], -+ type=EventTypes.JoinRules, -+ state_key="", -+ content={"join_rule": JoinRules.INVITE}, -+ sender=user2_id, -+ room_id=room_id1, -+ room_version=self.get_success(self.store.get_room_version_id(room_id1)), - ) - ) -- self.get_success( -- self.store.db_pool.simple_delete( -- table="local_current_membership", -- keyvalues={ -- "room_id": room_id1, -- "user_id": user1_id, -- }, -- desc="state reset user in local_current_membership", -- ) -- ) -- self.get_success( -- self.store.db_pool.simple_insert( -- table="current_state_delta_stream", -- values={ -- "stream_id": dummy_state_pos.stream, -- "room_id": room_id1, -- "type": EventTypes.Member, -- "state_key": user1_id, -- "event_id": None, -- "prev_event_id": join_response1["event_id"], -- "instance_name": dummy_state_pos.instance_name, -- }, -- desc="state reset user in current_state_delta_stream", -- ) -+ _, join_rule_event_pos, _ = self.get_success( -+ self.persistence.persist_event(join_rule_event, join_rule_context) - ) - -- # Manually bust the cache since we we're just manually messing with the database -- # and not causing an actual state reset. -- self.store._membership_stream_cache.entity_has_changed( -- user1_id, dummy_state_pos.stream -- ) -+ # Ensure that the state reset worked and only user2 is in the room now -+ users_in_room = self.get_success(self.store.get_users_in_room(room_id1)) -+ self.assertIncludes(set(users_in_room), {user2_id}, exact=True) - - after_reset_token = self.event_sources.get_current_token() - - # The function under test -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_reset_token, - to_token=after_reset_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - - # Room1 should show up because it was `newly_left` via state reset during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) -+ self.assertIncludes(room_id_results, {room_id1, room_id2}, exact=True) - # It should be pointing to no event because we were removed from the room - # without a corresponding leave event - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - None, -+ "Corresponding map to disambiguate the opaque event IDs: " -+ + str( -+ { -+ "join_response1": join_response1["event_id"], -+ } -+ ), - ) - # State reset caused us to leave the room and there is no corresponding leave event -- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.LEAVE, -+ ) - # We should *NOT* be `newly_joined` because we joined before the token range - self.assertTrue(room_id1 not in newly_joined) - # We should be `newly_left` because we were removed via state reset during the from/to range - self.assertTrue(room_id1 in newly_left) - - --class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCase): -+# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the -+# foreground update for -+# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by -+# https://github.com/element-hq/synapse/issues/17623) -+@parameterized_class( -+ ("use_new_tables",), -+ [ -+ (True,), -+ (False,), -+ ], -+ class_name_func=lambda cls, -+ num, -+ params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", -+) -+class ComputeInterestedRoomsShardTestCase( -+ BaseMultiWorkerStreamTestCase, SlidingSyncBase -+): - """ -- Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it works with -+ Tests Sliding Sync handler `compute_interested_rooms()` to make sure it works with - sharded event stream_writers enabled - """ - -@@ -2475,7 +3070,7 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa - join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) - join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok) - # Leave room2 -- leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok) -+ _leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok) - join_response3 = self.helper.join(room_id3, user1_id, tok=user1_tok) - # Leave room3 - self.helper.leave(room_id3, user1_id, tok=user1_tok) -@@ -2565,57 +3160,74 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa - self.get_success(actx.__aexit__(None, None, None)) - - # The function under test -- room_id_results, newly_joined, newly_left = self.get_success( -- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( -- UserID.from_string(user1_id), -+ interested_rooms = self.get_success( -+ self.sliding_sync_handler.room_lists.compute_interested_rooms( -+ SlidingSyncConfig( -+ user=UserID.from_string(user1_id), -+ requester=create_requester(user_id=user1_id), -+ lists={ -+ "foo-list": SlidingSyncConfig.SlidingSyncList( -+ ranges=[(0, 99)], -+ required_state=[], -+ timeline_limit=1, -+ ) -+ }, -+ conn_id=None, -+ ), -+ PerConnectionState(), - from_token=before_stuck_activity_token, - to_token=stuck_activity_token, - ) - ) -+ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) -+ newly_joined = interested_rooms.newly_joined_rooms -+ newly_left = interested_rooms.newly_left_rooms - -- self.assertEqual( -- room_id_results.keys(), -+ self.assertIncludes( -+ room_id_results, - { - room_id1, -- room_id2, -+ # Excluded because we left before the from/to range and the second join -+ # event happened while worker2 was stuck and technically occurs after -+ # the `stuck_activity_token`. -+ # room_id2, - room_id3, - }, -+ exact=True, -+ message="Corresponding map to disambiguate the opaque room IDs: " -+ + str( -+ { -+ "room_id1": room_id1, -+ "room_id2": room_id2, -+ "room_id3": room_id3, -+ } -+ ), - ) - - # Room1 - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id1].event_id, -+ interested_rooms.room_membership_for_user_map[room_id1].event_id, - join_room1_response["event_id"], - ) -- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id1].membership, -+ Membership.JOIN, -+ ) - # We should be `newly_joined` because we joined during the token range - self.assertTrue(room_id1 in newly_joined) - self.assertTrue(room_id1 not in newly_left) - -- # Room2 -- # It should be pointing to the latest membership event in the from/to range -- self.assertEqual( -- room_id_results[room_id2].event_id, -- leave_room2_response["event_id"], -- ) -- self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) -- # room_id2 should *NOT* be considered `newly_left` because we left before the -- # from/to range and the join event during the range happened while worker2 was -- # stuck. This means that from the perspective of the master, where the -- # `stuck_activity_token` is generated, the stream position for worker2 wasn't -- # advanced to the join yet. Looking at the `instance_map`, the join technically -- # comes after `stuck_activity_token`. -- self.assertTrue(room_id2 not in newly_joined) -- self.assertTrue(room_id2 not in newly_left) -- - # Room3 - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( -- room_id_results[room_id3].event_id, -+ interested_rooms.room_membership_for_user_map[room_id3].event_id, - join_on_worker3_response["event_id"], - ) -- self.assertEqual(room_id_results[room_id3].membership, Membership.JOIN) -+ self.assertEqual( -+ interested_rooms.room_membership_for_user_map[room_id3].membership, -+ Membership.JOIN, -+ ) - # We should be `newly_joined` because we joined during the token range - self.assertTrue(room_id3 in newly_joined) - self.assertTrue(room_id3 not in newly_left) -@@ -2645,6 +3257,9 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - self.store = self.hs.get_datastores().main - self.event_sources = hs.get_event_sources() - self.storage_controllers = hs.get_storage_controllers() -+ persistence = self.hs.get_storage_controllers().persistence -+ assert persistence is not None -+ self.persistence = persistence - - def _get_sync_room_ids_for_user( - self, -@@ -2687,7 +3302,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - to_token=now_token, - ) - -- self.assertEqual(room_id_results.keys(), set()) -+ self.assertIncludes(room_id_results.keys(), set(), exact=True) - - def test_basic_rooms(self) -> None: - """ -@@ -2753,7 +3368,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - ) - - # Ensure that the invited, ban, and knock rooms show up -- self.assertEqual( -+ self.assertIncludes( - room_id_results.keys(), - { - join_room_id, -@@ -2761,6 +3376,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - ban_room_id, - knock_room_id, - }, -+ exact=True, - ) - # It should be pointing to the the respective membership event (latest - # membership event in the from/to range) -@@ -2824,7 +3440,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - ) - - # Only the `newly_left` room should show up -- self.assertEqual(room_id_results.keys(), {room_id2}) -+ self.assertIncludes(room_id_results.keys(), {room_id2}, exact=True) - self.assertEqual( - room_id_results[room_id2].event_id, - _leave_response2["event_id"], -@@ -2869,7 +3485,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - ) - - # The kicked room should show up -- self.assertEqual(room_id_results.keys(), {kick_room_id}) -+ self.assertIncludes(room_id_results.keys(), {kick_room_id}, exact=True) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( - room_id_results[kick_room_id].event_id, -@@ -2893,8 +3509,17 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - user2_tok = self.login(user2_id, "pass") - - # The room where the state reset will happen -- room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) -- join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) -+ room_id1 = self.helper.create_room_as( -+ user2_id, -+ is_public=True, -+ tok=user2_tok, -+ ) -+ # Create a dummy event for us to point back to for the state reset -+ dummy_event_response = self.helper.send(room_id1, "test", tok=user2_tok) -+ dummy_event_id = dummy_event_response["event_id"] -+ -+ # Join after the dummy event -+ self.helper.join(room_id1, user1_id, tok=user1_tok) - - # Join another room so we don't hit the short-circuit and return early if they - # have no room membership -@@ -2903,61 +3528,26 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - - before_reset_token = self.event_sources.get_current_token() - -- # Send another state event to make a position for the state reset to happen at -- dummy_state_response = self.helper.send_state( -- room_id1, -- event_type="foobarbaz", -- state_key="", -- body={"foo": "bar"}, -- tok=user2_tok, -- ) -- dummy_state_pos = self.get_success( -- self.store.get_position_for_event(dummy_state_response["event_id"]) -- ) -- -- # Mock a state reset removing the membership for user1 in the current state -- self.get_success( -- self.store.db_pool.simple_delete( -- table="current_state_events", -- keyvalues={ -- "room_id": room_id1, -- "type": EventTypes.Member, -- "state_key": user1_id, -- }, -- desc="state reset user in current_state_events", -- ) -- ) -- self.get_success( -- self.store.db_pool.simple_delete( -- table="local_current_membership", -- keyvalues={ -- "room_id": room_id1, -- "user_id": user1_id, -- }, -- desc="state reset user in local_current_membership", -+ # Trigger a state reset -+ join_rule_event, join_rule_context = self.get_success( -+ create_event( -+ self.hs, -+ prev_event_ids=[dummy_event_id], -+ type=EventTypes.JoinRules, -+ state_key="", -+ content={"join_rule": JoinRules.INVITE}, -+ sender=user2_id, -+ room_id=room_id1, -+ room_version=self.get_success(self.store.get_room_version_id(room_id1)), - ) - ) -- self.get_success( -- self.store.db_pool.simple_insert( -- table="current_state_delta_stream", -- values={ -- "stream_id": dummy_state_pos.stream, -- "room_id": room_id1, -- "type": EventTypes.Member, -- "state_key": user1_id, -- "event_id": None, -- "prev_event_id": join_response1["event_id"], -- "instance_name": dummy_state_pos.instance_name, -- }, -- desc="state reset user in current_state_delta_stream", -- ) -+ _, join_rule_event_pos, _ = self.get_success( -+ self.persistence.persist_event(join_rule_event, join_rule_context) - ) - -- # Manually bust the cache since we we're just manually messing with the database -- # and not causing an actual state reset. -- self.store._membership_stream_cache.entity_has_changed( -- user1_id, dummy_state_pos.stream -- ) -+ # Ensure that the state reset worked and only user2 is in the room now -+ users_in_room = self.get_success(self.store.get_users_in_room(room_id1)) -+ self.assertIncludes(set(users_in_room), {user2_id}, exact=True) - - after_reset_token = self.event_sources.get_current_token() - -@@ -2969,7 +3559,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): - ) - - # Room1 should show up because it was `newly_left` via state reset during the from/to range -- self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) -+ self.assertIncludes(room_id_results.keys(), {room_id1, room_id2}, exact=True) - # It should be pointing to no event because we were removed from the room - # without a corresponding leave event - self.assertEqual( --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0036-Pass-leave-from-remote-invite-rejection-down-Sliding.patch b/packages/overlays/matrix-synapse/patches/0036-Pass-leave-from-remote-invite-rejection-down-Sliding.patch deleted file mode 100644
index ffb0912..0000000 --- a/packages/overlays/matrix-synapse/patches/0036-Pass-leave-from-remote-invite-rejection-down-Sliding.patch +++ /dev/null
@@ -1,537 +0,0 @@ -From 7c633f1a58e22ea27a172efdc52d94bfdac8c728 Mon Sep 17 00:00:00 2001 -From: Devon Hudson <devon.dmytro@gmail.com> -Date: Thu, 8 May 2025 14:28:23 +0000 -Subject: [PATCH 36/74] Pass leave from remote invite rejection down Sliding - Sync (#18375) - -Fixes #17753 - - -### Dev notes - -The `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` -database tables were added in -https://github.com/element-hq/synapse/pull/17512 - -### Pull Request Checklist - -<!-- Please read -https://element-hq.github.io/synapse/latest/development/contributing_guide.html -before submitting your pull request --> - -* [X] Pull request is based on the develop branch -* [x] Pull request includes a [changelog -file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). -The entry should: -- Be a short description of your change which makes sense to users. -"Fixed a bug that prevented receiving messages from other servers." -instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - - Use markdown where necessary, mostly for `code blocks`. - - End with either a period (.) or an exclamation mark (!). - - Start with a capital letter. -- Feel free to credit yourself, by adding a sentence "Contributed by -@github_username." or "Contributed by [Your Name]." to the end of the -entry. -* [X] [Code -style](https://element-hq.github.io/synapse/latest/code_style.html) is -correct -(run the -[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) - ---------- - -Co-authored-by: Erik Johnston <erik@matrix.org> -Co-authored-by: Olivier 'reivilibre <oliverw@matrix.org> -Co-authored-by: Eric Eastwood <erice@element.io> ---- - changelog.d/18375.bugfix | 1 + - synapse/handlers/sliding_sync/__init__.py | 23 ++ - synapse/handlers/sliding_sync/room_lists.py | 49 ++++- - synapse/storage/databases/main/stream.py | 202 ++++++++++++++++++ - .../92/03_ss_membership_snapshot_idx.sql | 16 ++ - tests/handlers/test_sliding_sync.py | 12 ++ - .../client/sliding_sync/test_sliding_sync.py | 58 +++++ - 7 files changed, 360 insertions(+), 1 deletion(-) - create mode 100644 changelog.d/18375.bugfix - create mode 100644 synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql - -diff --git a/changelog.d/18375.bugfix b/changelog.d/18375.bugfix -new file mode 100644 -index 0000000000..faebe6f046 ---- /dev/null -+++ b/changelog.d/18375.bugfix -@@ -0,0 +1 @@ -+Pass leave from remote invite rejection down Sliding Sync. -diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py -index 459d3c3e24..cb56eb53fc 100644 ---- a/synapse/handlers/sliding_sync/__init__.py -+++ b/synapse/handlers/sliding_sync/__init__.py -@@ -271,6 +271,7 @@ class SlidingSyncHandler: - from_token=from_token, - to_token=to_token, - newly_joined=room_id in interested_rooms.newly_joined_rooms, -+ newly_left=room_id in interested_rooms.newly_left_rooms, - is_dm=room_id in interested_rooms.dm_room_ids, - ) - -@@ -542,6 +543,7 @@ class SlidingSyncHandler: - from_token: Optional[SlidingSyncStreamToken], - to_token: StreamToken, - newly_joined: bool, -+ newly_left: bool, - is_dm: bool, - ) -> SlidingSyncResult.RoomResult: - """ -@@ -559,6 +561,7 @@ class SlidingSyncHandler: - from_token: The point in the stream to sync from. - to_token: The point in the stream to sync up to. - newly_joined: If the user has newly joined the room -+ newly_left: If the user has newly left the room - is_dm: Whether the room is a DM room - """ - user = sync_config.user -@@ -856,6 +859,26 @@ class SlidingSyncHandler: - # TODO: Limit the number of state events we're about to send down - # the room, if its too many we should change this to an - # `initial=True`? -+ -+ # For the case of rejecting remote invites, the leave event won't be -+ # returned by `get_current_state_deltas_for_room`. This is due to the current -+ # state only being filled out for rooms the server is in, and so doesn't pick -+ # up out-of-band leaves (including locally rejected invites) as these events -+ # are outliers and not added to the `current_state_delta_stream`. -+ # -+ # We rely on being explicitly told that the room has been `newly_left` to -+ # ensure we extract the out-of-band leave. -+ if newly_left and room_membership_for_user_at_to_token.event_id is not None: -+ membership_changed = True -+ leave_event = await self.store.get_event( -+ room_membership_for_user_at_to_token.event_id -+ ) -+ state_key = leave_event.get_state_key() -+ if state_key is not None: -+ room_state_delta_id_map[(leave_event.type, state_key)] = ( -+ room_membership_for_user_at_to_token.event_id -+ ) -+ - deltas = await self.get_current_state_deltas_for_room( - room_id=room_id, - room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, -diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py -index 7e3cf539df..6d1ac91605 100644 ---- a/synapse/handlers/sliding_sync/room_lists.py -+++ b/synapse/handlers/sliding_sync/room_lists.py -@@ -1120,7 +1120,7 @@ class SlidingSyncRoomLists: - ( - newly_joined_room_ids, - newly_left_room_map, -- ) = await self._get_newly_joined_and_left_rooms( -+ ) = await self._get_newly_joined_and_left_rooms_fallback( - user_id, to_token=to_token, from_token=from_token - ) - -@@ -1176,6 +1176,53 @@ class SlidingSyncRoomLists: - "state reset" out of the room, and so that room would not be part of the - "current memberships" of the user. - -+ Returns: -+ A 2-tuple of newly joined room IDs and a map of newly_left room -+ IDs to the `RoomsForUserStateReset` entry. -+ -+ We're using `RoomsForUserStateReset` but that doesn't necessarily mean the -+ user was state reset of the rooms. It's just that the `event_id`/`sender` -+ are optional and we can't tell the difference between the server leaving the -+ room when the user was the last person participating in the room and left or -+ was state reset out of the room. To actually check for a state reset, you -+ need to check if a membership still exists in the room. -+ """ -+ -+ newly_joined_room_ids: Set[str] = set() -+ newly_left_room_map: Dict[str, RoomsForUserStateReset] = {} -+ -+ if not from_token: -+ return newly_joined_room_ids, newly_left_room_map -+ -+ changes = await self.store.get_sliding_sync_membership_changes( -+ user_id, -+ from_key=from_token.room_key, -+ to_key=to_token.room_key, -+ excluded_room_ids=set(self.rooms_to_exclude_globally), -+ ) -+ -+ for room_id, entry in changes.items(): -+ if entry.membership == Membership.JOIN: -+ newly_joined_room_ids.add(room_id) -+ elif entry.membership == Membership.LEAVE: -+ newly_left_room_map[room_id] = entry -+ -+ return newly_joined_room_ids, newly_left_room_map -+ -+ @trace -+ async def _get_newly_joined_and_left_rooms_fallback( -+ self, -+ user_id: str, -+ to_token: StreamToken, -+ from_token: Optional[StreamToken], -+ ) -> Tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: -+ """Fetch the sets of rooms that the user newly joined or left in the -+ given token range. -+ -+ Note: there may be rooms in the newly left rooms where the user was -+ "state reset" out of the room, and so that room would not be part of the -+ "current memberships" of the user. -+ - Returns: - A 2-tuple of newly joined room IDs and a map of newly_left room - IDs to the `RoomsForUserStateReset` entry. -diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py -index c52389b8a9..3fda49f31f 100644 ---- a/synapse/storage/databases/main/stream.py -+++ b/synapse/storage/databases/main/stream.py -@@ -80,6 +80,7 @@ from synapse.storage.database import ( - ) - from synapse.storage.databases.main.events_worker import EventsWorkerStore - from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine -+from synapse.storage.roommember import RoomsForUserStateReset - from synapse.storage.util.id_generators import MultiWriterIdGenerator - from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection - from synapse.util.caches.descriptors import cached, cachedList -@@ -993,6 +994,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): - available in the `current_state_delta_stream` table. To actually check for a - state reset, you need to check if a membership still exists in the room. - """ -+ -+ assert from_key.topological is None -+ assert to_key.topological is None -+ - # Start by ruling out cases where a DB query is not necessary. - if from_key == to_key: - return [] -@@ -1138,6 +1143,203 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): - if membership_change.room_id not in room_ids_to_exclude - ] - -+ @trace -+ async def get_sliding_sync_membership_changes( -+ self, -+ user_id: str, -+ from_key: RoomStreamToken, -+ to_key: RoomStreamToken, -+ excluded_room_ids: Optional[AbstractSet[str]] = None, -+ ) -> Dict[str, RoomsForUserStateReset]: -+ """ -+ Fetch membership events that result in a meaningful membership change for a -+ given user. -+ -+ A meaningful membership changes is one where the `membership` value actually -+ changes. This means memberships changes from `join` to `join` (like a display -+ name change) will be filtered out since they result in no meaningful change. -+ -+ Note: This function only works with "live" tokens with `stream_ordering` only. -+ -+ We're looking for membership changes in the token range (> `from_key` and <= -+ `to_key`). -+ -+ Args: -+ user_id: The user ID to fetch membership events for. -+ from_key: The point in the stream to sync from (fetching events > this point). -+ to_key: The token to fetch rooms up to (fetching events <= this point). -+ excluded_room_ids: Optional list of room IDs to exclude from the results. -+ -+ Returns: -+ All meaningful membership changes to the current state in the token range. -+ Events are sorted by `stream_ordering` ascending. -+ -+ `event_id`/`sender` can be `None` when the server leaves a room (meaning -+ everyone locally left) or a state reset which removed the person from the -+ room. We can't tell the difference between the two cases with what's -+ available in the `current_state_delta_stream` table. To actually check for a -+ state reset, you need to check if a membership still exists in the room. -+ """ -+ -+ assert from_key.topological is None -+ assert to_key.topological is None -+ -+ # Start by ruling out cases where a DB query is not necessary. -+ if from_key == to_key: -+ return {} -+ -+ if from_key: -+ has_changed = self._membership_stream_cache.has_entity_changed( -+ user_id, int(from_key.stream) -+ ) -+ if not has_changed: -+ return {} -+ -+ room_ids_to_exclude: AbstractSet[str] = set() -+ if excluded_room_ids is not None: -+ room_ids_to_exclude = excluded_room_ids -+ -+ def f(txn: LoggingTransaction) -> Dict[str, RoomsForUserStateReset]: -+ # To handle tokens with a non-empty instance_map we fetch more -+ # results than necessary and then filter down -+ min_from_id = from_key.stream -+ max_to_id = to_key.get_max_stream_pos() -+ -+ # This query looks at membership changes in -+ # `sliding_sync_membership_snapshots` which will not include users -+ # that were state reset out of rooms; so we need to look for that -+ # case in `current_state_delta_stream`. -+ sql = """ -+ SELECT -+ room_id, -+ membership_event_id, -+ event_instance_name, -+ event_stream_ordering, -+ membership, -+ sender, -+ prev_membership, -+ room_version -+ FROM -+ ( -+ SELECT -+ s.room_id, -+ s.membership_event_id, -+ s.event_instance_name, -+ s.event_stream_ordering, -+ s.membership, -+ s.sender, -+ m_prev.membership AS prev_membership -+ FROM sliding_sync_membership_snapshots as s -+ LEFT JOIN event_edges AS e ON e.event_id = s.membership_event_id -+ LEFT JOIN room_memberships AS m_prev ON m_prev.event_id = e.prev_event_id -+ WHERE s.user_id = ? -+ -+ UNION ALL -+ -+ SELECT -+ s.room_id, -+ e.event_id, -+ s.instance_name, -+ s.stream_id, -+ m.membership, -+ e.sender, -+ m_prev.membership AS prev_membership -+ FROM current_state_delta_stream AS s -+ LEFT JOIN events AS e ON e.event_id = s.event_id -+ LEFT JOIN room_memberships AS m ON m.event_id = s.event_id -+ LEFT JOIN room_memberships AS m_prev ON m_prev.event_id = s.prev_event_id -+ WHERE -+ s.type = ? -+ AND s.state_key = ? -+ ) AS c -+ INNER JOIN rooms USING (room_id) -+ WHERE event_stream_ordering > ? AND event_stream_ordering <= ? -+ ORDER BY event_stream_ordering ASC -+ """ -+ -+ txn.execute( -+ sql, -+ (user_id, EventTypes.Member, user_id, min_from_id, max_to_id), -+ ) -+ -+ membership_changes: Dict[str, RoomsForUserStateReset] = {} -+ for ( -+ room_id, -+ membership_event_id, -+ event_instance_name, -+ event_stream_ordering, -+ membership, -+ sender, -+ prev_membership, -+ room_version_id, -+ ) in txn: -+ assert room_id is not None -+ assert event_stream_ordering is not None -+ -+ if room_id in room_ids_to_exclude: -+ continue -+ -+ if _filter_results_by_stream( -+ from_key, -+ to_key, -+ event_instance_name, -+ event_stream_ordering, -+ ): -+ # When the server leaves a room, it will insert new rows into the -+ # `current_state_delta_stream` table with `event_id = null` for all -+ # current state. This means we might already have a row for the -+ # leave event and then another for the same leave where the -+ # `event_id=null` but the `prev_event_id` is pointing back at the -+ # earlier leave event. We don't want to report the leave, if we -+ # already have a leave event. -+ if ( -+ membership_event_id is None -+ and prev_membership == Membership.LEAVE -+ ): -+ continue -+ -+ if membership_event_id is None and room_id in membership_changes: -+ # SUSPICIOUS: if we join a room and get state reset out of it -+ # in the same queried window, -+ # won't this ignore the 'state reset out of it' part? -+ continue -+ -+ # When `s.event_id = null`, we won't be able to get respective -+ # `room_membership` but can assume the user has left the room -+ # because this only happens when the server leaves a room -+ # (meaning everyone locally left) or a state reset which removed -+ # the person from the room. -+ membership = ( -+ membership if membership is not None else Membership.LEAVE -+ ) -+ -+ if membership == prev_membership: -+ # If `membership` and `prev_membership` are the same then this -+ # is not a meaningful change so we can skip it. -+ # An example of this happening is when the user changes their display name. -+ continue -+ -+ membership_change = RoomsForUserStateReset( -+ room_id=room_id, -+ sender=sender, -+ membership=membership, -+ event_id=membership_event_id, -+ event_pos=PersistedEventPosition( -+ event_instance_name, event_stream_ordering -+ ), -+ room_version_id=room_version_id, -+ ) -+ -+ membership_changes[room_id] = membership_change -+ -+ return membership_changes -+ -+ membership_changes = await self.db_pool.runInteraction( -+ "get_sliding_sync_membership_changes", f -+ ) -+ -+ return membership_changes -+ - @cancellable - async def get_membership_changes_for_user( - self, -diff --git a/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql b/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql -new file mode 100644 -index 0000000000..c694203f95 ---- /dev/null -+++ b/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql -@@ -0,0 +1,16 @@ -+-- -+-- This file is licensed under the Affero General Public License (AGPL) version 3. -+-- -+-- Copyright (C) 2025 New Vector, Ltd -+-- -+-- This program is free software: you can redistribute it and/or modify -+-- it under the terms of the GNU Affero General Public License as -+-- published by the Free Software Foundation, either version 3 of the -+-- License, or (at your option) any later version. -+-- -+-- See the GNU Affero General Public License for more details: -+-- <https://www.gnu.org/licenses/agpl-3.0.html>. -+ -+-- So we can fetch all rooms for a given user sorted by stream order -+DROP INDEX IF EXISTS sliding_sync_membership_snapshots_user_id; -+CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id, event_stream_ordering); -diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py -index cbacf21ae7..7144c58217 100644 ---- a/tests/handlers/test_sliding_sync.py -+++ b/tests/handlers/test_sliding_sync.py -@@ -594,6 +594,12 @@ class ComputeInterestedRoomsTestCase(SlidingSyncBase): - the correct list of rooms IDs. - """ - -+ # FIXME: We should refactor these tests to run against `compute_interested_rooms(...)` -+ # instead of just `get_room_membership_for_user_at_to_token(...)` which is only used -+ # in the fallback path (`_compute_interested_rooms_fallback(...)`). These scenarios do -+ # well to stress that logic and we shouldn't remove them just because we're removing -+ # the fallback path (tracked by https://github.com/element-hq/synapse/issues/17623). -+ - servlets = [ - admin.register_servlets, - knock.register_servlets, -@@ -2976,6 +2982,12 @@ class ComputeInterestedRoomsShardTestCase( - sharded event stream_writers enabled - """ - -+ # FIXME: We should refactor these tests to run against `compute_interested_rooms(...)` -+ # instead of just `get_room_membership_for_user_at_to_token(...)` which is only used -+ # in the fallback path (`_compute_interested_rooms_fallback(...)`). These scenarios do -+ # well to stress that logic and we shouldn't remove them just because we're removing -+ # the fallback path (tracked by https://github.com/element-hq/synapse/issues/17623). -+ - servlets = [ - admin.register_servlets_for_client_rest_resource, - room.register_servlets, -diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py -index f3cf2111ec..dcec5b4cf0 100644 ---- a/tests/rest/client/sliding_sync/test_sliding_sync.py -+++ b/tests/rest/client/sliding_sync/test_sliding_sync.py -@@ -790,6 +790,64 @@ class SlidingSyncTestCase(SlidingSyncBase): - exact=True, - ) - -+ def test_reject_remote_invite(self) -> None: -+ """Test that rejecting a remote invite comes down incremental sync""" -+ -+ user_id = self.register_user("user1", "pass") -+ user_tok = self.login(user_id, "pass") -+ -+ # Create a remote room invite (out-of-band membership) -+ room_id = "!room:remote.server" -+ self._create_remote_invite_room_for_user(user_id, None, room_id) -+ -+ # Make the Sliding Sync request -+ sync_body = { -+ "lists": { -+ "foo-list": { -+ "ranges": [[0, 1]], -+ "required_state": [(EventTypes.Member, StateValues.ME)], -+ "timeline_limit": 3, -+ } -+ } -+ } -+ response_body, from_token = self.do_sync(sync_body, tok=user_tok) -+ # We should see the room (like normal) -+ self.assertIncludes( -+ set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), -+ {room_id}, -+ exact=True, -+ ) -+ -+ # Reject the remote room invite -+ self.helper.leave(room_id, user_id, tok=user_tok) -+ -+ # Sync again after rejecting the invite -+ response_body, _ = self.do_sync(sync_body, since=from_token, tok=user_tok) -+ -+ # The fix to add the leave event to incremental sync when rejecting a remote -+ # invite relies on the new tables to work. -+ if self.use_new_tables: -+ # We should see the newly_left room -+ self.assertIncludes( -+ set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), -+ {room_id}, -+ exact=True, -+ ) -+ # We should see the leave state for the room so clients don't end up with stuck -+ # invites -+ self.assertIncludes( -+ { -+ ( -+ state["type"], -+ state["state_key"], -+ state["content"].get("membership"), -+ ) -+ for state in response_body["rooms"][room_id]["required_state"] -+ }, -+ {(EventTypes.Member, user_id, Membership.LEAVE)}, -+ exact=True, -+ ) -+ - def test_ignored_user_invites_initial_sync(self) -> None: - """ - Make sure we ignore invites if they are from one of the `m.ignored_user_list` on --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0037-Bump-sha2-from-0.10.8-to-0.10.9-18395.patch b/packages/overlays/matrix-synapse/patches/0037-Bump-sha2-from-0.10.8-to-0.10.9-18395.patch deleted file mode 100644
index 38b7a9c..0000000 --- a/packages/overlays/matrix-synapse/patches/0037-Bump-sha2-from-0.10.8-to-0.10.9-18395.patch +++ /dev/null
@@ -1,28 +0,0 @@ -From b5d94f654c32b0cd09ba727baddba93b0bf4f63f Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Fri, 9 May 2025 15:35:18 +0100 -Subject: [PATCH 37/74] Bump sha2 from 0.10.8 to 0.10.9 (#18395) - ---- - Cargo.lock | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/Cargo.lock b/Cargo.lock -index 822eb2cdba..27a2e26be5 100644 ---- a/Cargo.lock -+++ b/Cargo.lock -@@ -480,9 +480,9 @@ dependencies = [ - - [[package]] - name = "sha2" --version = "0.10.8" -+version = "0.10.9" - source = "registry+https://github.com/rust-lang/crates.io-index" --checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" -+checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" - dependencies = [ - "cfg-if", - "cpufeatures", --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0038-Bump-txredisapi-from-1.4.10-to-1.4.11-18392.patch b/packages/overlays/matrix-synapse/patches/0038-Bump-txredisapi-from-1.4.10-to-1.4.11-18392.patch deleted file mode 100644
index f6d13c0..0000000 --- a/packages/overlays/matrix-synapse/patches/0038-Bump-txredisapi-from-1.4.10-to-1.4.11-18392.patch +++ /dev/null
@@ -1,35 +0,0 @@ -From c6dfe70014c7f577a7fa749bfb8953bd08bc69d7 Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Fri, 9 May 2025 15:36:41 +0100 -Subject: [PATCH 38/74] Bump txredisapi from 1.4.10 to 1.4.11 (#18392) - ---- - poetry.lock | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/poetry.lock b/poetry.lock -index abd97a785b..69d76936b0 100644 ---- a/poetry.lock -+++ b/poetry.lock -@@ -2886,15 +2886,15 @@ windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)" - - [[package]] - name = "txredisapi" --version = "1.4.10" -+version = "1.4.11" - description = "non-blocking redis client for python" - optional = true - python-versions = "*" - groups = ["main"] - markers = "extra == \"all\" or extra == \"redis\"" - files = [ -- {file = "txredisapi-1.4.10-py3-none-any.whl", hash = "sha256:0a6ea77f27f8cf092f907654f08302a97b48fa35f24e0ad99dfb74115f018161"}, -- {file = "txredisapi-1.4.10.tar.gz", hash = "sha256:7609a6af6ff4619a3189c0adfb86aeda789afba69eb59fc1e19ac0199e725395"}, -+ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"}, -+ {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"}, - ] - - [package.dependencies] --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0039-Bump-packaging-from-24.2-to-25.0-18393.patch b/packages/overlays/matrix-synapse/patches/0039-Bump-packaging-from-24.2-to-25.0-18393.patch deleted file mode 100644
index be5695e..0000000 --- a/packages/overlays/matrix-synapse/patches/0039-Bump-packaging-from-24.2-to-25.0-18393.patch +++ /dev/null
@@ -1,34 +0,0 @@ -From b7728a2df10de6cd09f5313ebca8a95e226c15fc Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Fri, 9 May 2025 15:37:05 +0100 -Subject: [PATCH 39/74] Bump packaging from 24.2 to 25.0 (#18393) - ---- - poetry.lock | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/poetry.lock b/poetry.lock -index 69d76936b0..8ff3a377f4 100644 ---- a/poetry.lock -+++ b/poetry.lock -@@ -1561,14 +1561,14 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte - - [[package]] - name = "packaging" --version = "24.2" -+version = "25.0" - description = "Core utilities for Python packages" - optional = false - python-versions = ">=3.8" - groups = ["main", "dev"] - files = [ -- {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, -- {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, -+ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, -+ {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, - ] - - [[package]] --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0040-Bump-pydantic-from-2.10.3-to-2.11.4-18394.patch b/packages/overlays/matrix-synapse/patches/0040-Bump-pydantic-from-2.10.3-to-2.11.4-18394.patch deleted file mode 100644
index 7693b29..0000000 --- a/packages/overlays/matrix-synapse/patches/0040-Bump-pydantic-from-2.10.3-to-2.11.4-18394.patch +++ /dev/null
@@ -1,279 +0,0 @@ -From 1920dfff40ad1078071e099a2afbfa31a5409e6b Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Fri, 9 May 2025 16:36:54 +0100 -Subject: [PATCH 40/74] Bump pydantic from 2.10.3 to 2.11.4 (#18394) - ---- - poetry.lock | 229 ++++++++++++++++++++++++++++------------------------ - 1 file changed, 122 insertions(+), 107 deletions(-) - -diff --git a/poetry.lock b/poetry.lock -index 8ff3a377f4..e06e08b7a7 100644 ---- a/poetry.lock -+++ b/poetry.lock -@@ -1795,20 +1795,21 @@ files = [ - - [[package]] - name = "pydantic" --version = "2.10.3" -+version = "2.11.4" - description = "Data validation using Python type hints" - optional = false --python-versions = ">=3.8" -+python-versions = ">=3.9" - groups = ["main", "dev"] - files = [ -- {file = "pydantic-2.10.3-py3-none-any.whl", hash = "sha256:be04d85bbc7b65651c5f8e6b9976ed9c6f41782a55524cef079a34a0bb82144d"}, -- {file = "pydantic-2.10.3.tar.gz", hash = "sha256:cb5ac360ce894ceacd69c403187900a02c4b20b693a9dd1d643e1effab9eadf9"}, -+ {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"}, -+ {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"}, - ] - - [package.dependencies] - annotated-types = ">=0.6.0" --pydantic-core = "2.27.1" -+pydantic-core = "2.33.2" - typing-extensions = ">=4.12.2" -+typing-inspection = ">=0.4.0" - - [package.extras] - email = ["email-validator (>=2.0.0)"] -@@ -1816,112 +1817,111 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows - - [[package]] - name = "pydantic-core" --version = "2.27.1" -+version = "2.33.2" - description = "Core functionality for Pydantic validation and serialization" - optional = false --python-versions = ">=3.8" -+python-versions = ">=3.9" - groups = ["main", "dev"] - files = [ -- {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, -- {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, -- {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, -- {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, -- {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, -- {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, -- {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, -- {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, -- {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, -- {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, -- {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, -- {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, -- {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, -- {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, -- {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, -- {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, -- {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, -- {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, -- {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, -- {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, -- {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, -- {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, -- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, -- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, -- {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, -+ {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, -+ {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, -+ {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, -+ {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, -+ {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, -+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, -+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, -+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, -+ {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, - ] - - [package.dependencies] -@@ -3085,6 +3085,21 @@ files = [ - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, - ] - -+[[package]] -+name = "typing-inspection" -+version = "0.4.0" -+description = "Runtime typing introspection tools" -+optional = false -+python-versions = ">=3.9" -+groups = ["main", "dev"] -+files = [ -+ {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, -+ {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, -+] -+ -+[package.dependencies] -+typing-extensions = ">=4.12.0" -+ - [[package]] - name = "unpaddedbase64" - version = "2.1.0" --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0041-Bump-actions-setup-go-from-5.4.0-to-5.5.0-18426.patch b/packages/overlays/matrix-synapse/patches/0041-Bump-actions-setup-go-from-5.4.0-to-5.5.0-18426.patch deleted file mode 100644
index 7d9416f..0000000 --- a/packages/overlays/matrix-synapse/patches/0041-Bump-actions-setup-go-from-5.4.0-to-5.5.0-18426.patch +++ /dev/null
@@ -1,54 +0,0 @@ -From 3dade08e7cef99a83e3410365a14a21a2b24d545 Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 13 May 2025 09:34:23 +0100 -Subject: [PATCH 41/74] Bump actions/setup-go from 5.4.0 to 5.5.0 (#18426) - -Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> ---- - .github/workflows/latest_deps.yml | 2 +- - .github/workflows/tests.yml | 2 +- - .github/workflows/twisted_trunk.yml | 2 +- - 3 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml -index e7378ec0d3..366bb4cddb 100644 ---- a/.github/workflows/latest_deps.yml -+++ b/.github/workflows/latest_deps.yml -@@ -200,7 +200,7 @@ jobs: - - name: Prepare Complement's Prerequisites - run: synapse/.ci/scripts/setup_complement_prerequisites.sh - -- - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 -+ - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - cache-dependency-path: complement/go.sum - go-version-file: complement/go.mod -diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml -index bb2e80a908..a7e35a0ece 100644 ---- a/.github/workflows/tests.yml -+++ b/.github/workflows/tests.yml -@@ -669,7 +669,7 @@ jobs: - - name: Prepare Complement's Prerequisites - run: synapse/.ci/scripts/setup_complement_prerequisites.sh - -- - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 -+ - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - cache-dependency-path: complement/go.sum - go-version-file: complement/go.mod -diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml -index 0176f17401..5638029b39 100644 ---- a/.github/workflows/twisted_trunk.yml -+++ b/.github/workflows/twisted_trunk.yml -@@ -173,7 +173,7 @@ jobs: - - name: Prepare Complement's Prerequisites - run: synapse/.ci/scripts/setup_complement_prerequisites.sh - -- - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 -+ - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - cache-dependency-path: complement/go.sum - go-version-file: complement/go.mod --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0042-Bump-pillow-from-11.1.0-to-11.2.1-18429.patch b/packages/overlays/matrix-synapse/patches/0042-Bump-pillow-from-11.1.0-to-11.2.1-18429.patch deleted file mode 100644
index 69e27c6..0000000 --- a/packages/overlays/matrix-synapse/patches/0042-Bump-pillow-from-11.1.0-to-11.2.1-18429.patch +++ /dev/null
@@ -1,191 +0,0 @@ -From 40ce11ded0aa32158aee4d6526b8dd40c1c63a6a Mon Sep 17 00:00:00 2001 -From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> -Date: Tue, 13 May 2025 09:46:03 +0100 -Subject: [PATCH 42/74] Bump pillow from 11.1.0 to 11.2.1 (#18429) - -Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> ---- - poetry.lock | 157 ++++++++++++++++++++++++++++------------------------ - 1 file changed, 84 insertions(+), 73 deletions(-) - -diff --git a/poetry.lock b/poetry.lock -index e06e08b7a7..1935df638a 100644 ---- a/poetry.lock -+++ b/poetry.lock -@@ -1600,89 +1600,100 @@ files = [ - - [[package]] - name = "pillow" --version = "11.1.0" -+version = "11.2.1" - description = "Python Imaging Library (Fork)" - optional = false - python-versions = ">=3.9" - groups = ["main"] - files = [ -- {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"}, -- {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"}, -- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2"}, -- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26"}, -- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07"}, -- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482"}, -- {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e"}, -- {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269"}, -- {file = "pillow-11.1.0-cp310-cp310-win32.whl", hash = "sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49"}, -- {file = "pillow-11.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a"}, -- {file = "pillow-11.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65"}, -- {file = "pillow-11.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457"}, -- {file = "pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35"}, -- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2"}, -- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070"}, -- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6"}, -- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1"}, -- {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2"}, -- {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96"}, -- {file = "pillow-11.1.0-cp311-cp311-win32.whl", hash = "sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f"}, -- {file = "pillow-11.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761"}, -- {file = "pillow-11.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71"}, -- {file = "pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a"}, -- {file = "pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b"}, -- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3"}, -- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a"}, -- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1"}, -- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f"}, -- {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91"}, -- {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c"}, -- {file = "pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6"}, -- {file = "pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf"}, -- {file = "pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5"}, -- {file = "pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc"}, -- {file = "pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0"}, -- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1"}, -- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec"}, -- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5"}, -- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114"}, -- {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352"}, -- {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3"}, -- {file = "pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9"}, -- {file = "pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c"}, -- {file = "pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65"}, -- {file = "pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861"}, -- {file = "pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081"}, -- {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c"}, -- {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547"}, -- {file = "pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab"}, -- {file = "pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9"}, -- {file = "pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe"}, -- {file = "pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756"}, -- {file = "pillow-11.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6"}, -- {file = "pillow-11.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e"}, -- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc"}, -- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2"}, -- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade"}, -- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884"}, -- {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196"}, -- {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8"}, -- {file = "pillow-11.1.0-cp39-cp39-win32.whl", hash = "sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5"}, -- {file = "pillow-11.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f"}, -- {file = "pillow-11.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a"}, -- {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90"}, -- {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb"}, -- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442"}, -- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83"}, -- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f"}, -- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73"}, -- {file = "pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0"}, -- {file = "pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20"}, -+ {file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"}, -+ {file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"}, -+ {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ba4be812c7a40280629e55ae0b14a0aafa150dd6451297562e1764808bbe61"}, -+ {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8bd62331e5032bc396a93609982a9ab6b411c05078a52f5fe3cc59234a3abd1"}, -+ {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:562d11134c97a62fe3af29581f083033179f7ff435f78392565a1ad2d1c2c45c"}, -+ {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:c97209e85b5be259994eb5b69ff50c5d20cca0f458ef9abd835e262d9d88b39d"}, -+ {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0c3e6d0f59171dfa2e25d7116217543310908dfa2770aa64b8f87605f8cacc97"}, -+ {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc1c3bc53befb6096b84165956e886b1729634a799e9d6329a0c512ab651e579"}, -+ {file = "pillow-11.2.1-cp310-cp310-win32.whl", hash = "sha256:312c77b7f07ab2139924d2639860e084ec2a13e72af54d4f08ac843a5fc9c79d"}, -+ {file = "pillow-11.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:9bc7ae48b8057a611e5fe9f853baa88093b9a76303937449397899385da06fad"}, -+ {file = "pillow-11.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:2728567e249cdd939f6cc3d1f049595c66e4187f3c34078cbc0a7d21c47482d2"}, -+ {file = "pillow-11.2.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70"}, -+ {file = "pillow-11.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf"}, -+ {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7"}, -+ {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8"}, -+ {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600"}, -+ {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788"}, -+ {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e"}, -+ {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e"}, -+ {file = "pillow-11.2.1-cp311-cp311-win32.whl", hash = "sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6"}, -+ {file = "pillow-11.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193"}, -+ {file = "pillow-11.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7"}, -+ {file = "pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f"}, -+ {file = "pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b"}, -+ {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d"}, -+ {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4"}, -+ {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d"}, -+ {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4"}, -+ {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443"}, -+ {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c"}, -+ {file = "pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3"}, -+ {file = "pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941"}, -+ {file = "pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb"}, -+ {file = "pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28"}, -+ {file = "pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830"}, -+ {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0"}, -+ {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1"}, -+ {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f"}, -+ {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155"}, -+ {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14"}, -+ {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b"}, -+ {file = "pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2"}, -+ {file = "pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691"}, -+ {file = "pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c"}, -+ {file = "pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22"}, -+ {file = "pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7"}, -+ {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16"}, -+ {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b"}, -+ {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406"}, -+ {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91"}, -+ {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751"}, -+ {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9"}, -+ {file = "pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd"}, -+ {file = "pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e"}, -+ {file = "pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681"}, -+ {file = "pillow-11.2.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:7491cf8a79b8eb867d419648fff2f83cb0b3891c8b36da92cc7f1931d46108c8"}, -+ {file = "pillow-11.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b02d8f9cb83c52578a0b4beadba92e37d83a4ef11570a8688bbf43f4ca50909"}, -+ {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:014ca0050c85003620526b0ac1ac53f56fc93af128f7546623cc8e31875ab928"}, -+ {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3692b68c87096ac6308296d96354eddd25f98740c9d2ab54e1549d6c8aea9d79"}, -+ {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:f781dcb0bc9929adc77bad571b8621ecb1e4cdef86e940fe2e5b5ee24fd33b35"}, -+ {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:2b490402c96f907a166615e9a5afacf2519e28295f157ec3a2bb9bd57de638cb"}, -+ {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dd6b20b93b3ccc9c1b597999209e4bc5cf2853f9ee66e3fc9a400a78733ffc9a"}, -+ {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4b835d89c08a6c2ee7781b8dd0a30209a8012b5f09c0a665b65b0eb3560b6f36"}, -+ {file = "pillow-11.2.1-cp39-cp39-win32.whl", hash = "sha256:b10428b3416d4f9c61f94b494681280be7686bda15898a3a9e08eb66a6d92d67"}, -+ {file = "pillow-11.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:6ebce70c3f486acf7591a3d73431fa504a4e18a9b97ff27f5f47b7368e4b9dd1"}, -+ {file = "pillow-11.2.1-cp39-cp39-win_arm64.whl", hash = "sha256:c27476257b2fdcd7872d54cfd119b3a9ce4610fb85c8e32b70b42e3680a29a1e"}, -+ {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9b7b0d4fd2635f54ad82785d56bc0d94f147096493a79985d0ab57aedd563156"}, -+ {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:aa442755e31c64037aa7c1cb186e0b369f8416c567381852c63444dd666fb772"}, -+ {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0d3348c95b766f54b76116d53d4cb171b52992a1027e7ca50c81b43b9d9e363"}, -+ {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85d27ea4c889342f7e35f6d56e7e1cb345632ad592e8c51b693d7b7556043ce0"}, -+ {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bf2c33d6791c598142f00c9c4c7d47f6476731c31081331664eb26d6ab583e01"}, -+ {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e616e7154c37669fc1dfc14584f11e284e05d1c650e1c0f972f281c4ccc53193"}, -+ {file = "pillow-11.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:39ad2e0f424394e3aebc40168845fee52df1394a4673a6ee512d840d14ab3013"}, -+ {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed"}, -+ {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c"}, -+ {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd"}, -+ {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076"}, -+ {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b"}, -+ {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f"}, -+ {file = "pillow-11.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044"}, -+ {file = "pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6"}, - ] - - [package.extras] --docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] -+docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] - fpx = ["olefile"] - mic = ["olefile"] -+test-arrow = ["pyarrow"] - tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] - typing = ["typing-extensions ; python_version < \"3.10\""] - xmp = ["defusedxml"] --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0043-1.130.0rc1.patch b/packages/overlays/matrix-synapse/patches/0043-1.130.0rc1.patch deleted file mode 100644
index 6d1ad3f..0000000 --- a/packages/overlays/matrix-synapse/patches/0043-1.130.0rc1.patch +++ /dev/null
@@ -1,351 +0,0 @@ -From 09b4109c2e3740d754f9256540bd96444fb82453 Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erik@matrix.org> -Date: Tue, 13 May 2025 10:44:11 +0100 -Subject: [PATCH 43/74] 1.130.0rc1 - ---- - CHANGES.md | 63 +++++++++++++++++++++++++++++++++++++++ - changelog.d/17578.misc | 1 - - changelog.d/18181.misc | 1 - - changelog.d/18214.feature | 1 - - changelog.d/18218.doc | 1 - - changelog.d/18237.doc | 1 - - changelog.d/18291.docker | 1 - - changelog.d/18292.docker | 1 - - changelog.d/18293.docker | 1 - - changelog.d/18295.docker | 1 - - changelog.d/18297.misc | 1 - - changelog.d/18300.feature | 1 - - changelog.d/18313.misc | 1 - - changelog.d/18320.doc | 1 - - changelog.d/18330.misc | 1 - - changelog.d/18355.feature | 1 - - changelog.d/18360.misc | 1 - - changelog.d/18363.bugfix | 1 - - changelog.d/18367.misc | 1 - - changelog.d/18369.misc | 1 - - changelog.d/18374.misc | 1 - - changelog.d/18375.bugfix | 1 - - changelog.d/18377.doc | 1 - - changelog.d/18384.doc | 1 - - changelog.d/18385.misc | 1 - - changelog.d/18390.misc | 1 - - changelog.d/18399.misc | 1 - - debian/changelog | 6 ++++ - pyproject.toml | 2 +- - 29 files changed, 70 insertions(+), 27 deletions(-) - delete mode 100644 changelog.d/17578.misc - delete mode 100644 changelog.d/18181.misc - delete mode 100644 changelog.d/18214.feature - delete mode 100644 changelog.d/18218.doc - delete mode 100644 changelog.d/18237.doc - delete mode 100644 changelog.d/18291.docker - delete mode 100644 changelog.d/18292.docker - delete mode 100644 changelog.d/18293.docker - delete mode 100644 changelog.d/18295.docker - delete mode 100644 changelog.d/18297.misc - delete mode 100644 changelog.d/18300.feature - delete mode 100644 changelog.d/18313.misc - delete mode 100644 changelog.d/18320.doc - delete mode 100644 changelog.d/18330.misc - delete mode 100644 changelog.d/18355.feature - delete mode 100644 changelog.d/18360.misc - delete mode 100644 changelog.d/18363.bugfix - delete mode 100644 changelog.d/18367.misc - delete mode 100644 changelog.d/18369.misc - delete mode 100644 changelog.d/18374.misc - delete mode 100644 changelog.d/18375.bugfix - delete mode 100644 changelog.d/18377.doc - delete mode 100644 changelog.d/18384.doc - delete mode 100644 changelog.d/18385.misc - delete mode 100644 changelog.d/18390.misc - delete mode 100644 changelog.d/18399.misc - -diff --git a/CHANGES.md b/CHANGES.md -index f04c7ef026..235d65c746 100644 ---- a/CHANGES.md -+++ b/CHANGES.md -@@ -1,3 +1,66 @@ -+# Synapse 1.130.0rc1 (2025-05-13) -+ -+### Features -+ -+- Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. ([\#18214](https://github.com/element-hq/synapse/issues/18214)) -+- Add config option `user_directory.exclude_remote_users` which, when enabled, excludes remote users from user directory search results. ([\#18300](https://github.com/element-hq/synapse/issues/18300)) -+- Add support for handling `GET /devices/` on workers. ([\#18355](https://github.com/element-hq/synapse/issues/18355)) -+ -+### Bugfixes -+ -+- Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. ([\#18363](https://github.com/element-hq/synapse/issues/18363)) -+- Pass leave from remote invite rejection down Sliding Sync. ([\#18375](https://github.com/element-hq/synapse/issues/18375)) -+ -+### Updates to the Docker image -+ -+- In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. ([\#18291](https://github.com/element-hq/synapse/issues/18291)) -+- Optimize the build of the workers image. ([\#18292](https://github.com/element-hq/synapse/issues/18292)) -+- In start_for_complement.sh, replace some external program calls with shell builtins. ([\#18293](https://github.com/element-hq/synapse/issues/18293)) -+- When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly. ([\#18295](https://github.com/element-hq/synapse/issues/18295)) -+ -+### Improved Documentation -+ -+- Improve formatting of the README file. ([\#18218](https://github.com/element-hq/synapse/issues/18218)) -+- Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider. ([\#18237](https://github.com/element-hq/synapse/issues/18237)) -+- Fix typo in docs about the `push` config option. Contributed by @HarHarLinks. ([\#18320](https://github.com/element-hq/synapse/issues/18320)) -+- Add `/_matrix/federation/v1/version` to list of federation endpoints that can be handled by workers. ([\#18377](https://github.com/element-hq/synapse/issues/18377)) -+- Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. ([\#18384](https://github.com/element-hq/synapse/issues/18384)) -+ -+### Internal Changes -+ -+- Return specific error code when adding an email address / phone number to account is not supported (MSC4178). ([\#17578](https://github.com/element-hq/synapse/issues/17578)) -+- Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later. ([\#18181](https://github.com/element-hq/synapse/issues/18181)) -+- Apply file hashing and existing quarantines to media downloaded for URL previews. ([\#18297](https://github.com/element-hq/synapse/issues/18297)) -+- Allow a few admin APIs used by matrix-authentication-service to run on workers. ([\#18313](https://github.com/element-hq/synapse/issues/18313)) -+- Apply `should_drop_federated_event` to federation invites. ([\#18330](https://github.com/element-hq/synapse/issues/18330)) -+- Allow `/rooms/` admin API to be run on workers. ([\#18360](https://github.com/element-hq/synapse/issues/18360)) -+- Minor performance improvements to the notifier. ([\#18367](https://github.com/element-hq/synapse/issues/18367)) -+- Slight performance increase when using the ratelimiter. ([\#18369](https://github.com/element-hq/synapse/issues/18369)) -+- Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token. ([\#18374](https://github.com/element-hq/synapse/issues/18374), [\#18385](https://github.com/element-hq/synapse/issues/18385)) -+- Fixed test failures when using authlib 1.5.2. ([\#18390](https://github.com/element-hq/synapse/issues/18390)) -+- Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Simplified Sliding Sync room list tests to cover both new and fallback logic paths. ([\#18399](https://github.com/element-hq/synapse/issues/18399)) -+ -+ -+ -+### Updates to locked dependencies -+ -+* Bump actions/add-to-project from 280af8ae1f83a494cfad2cb10f02f6d13529caa9 to 5b1a254a3546aef88e0a7724a77a623fa2e47c36. ([\#18365](https://github.com/element-hq/synapse/issues/18365)) -+* Bump actions/download-artifact from 4.2.1 to 4.3.0. ([\#18364](https://github.com/element-hq/synapse/issues/18364)) -+* Bump actions/setup-go from 5.4.0 to 5.5.0. ([\#18426](https://github.com/element-hq/synapse/issues/18426)) -+* Bump anyhow from 1.0.97 to 1.0.98. ([\#18336](https://github.com/element-hq/synapse/issues/18336)) -+* Bump packaging from 24.2 to 25.0. ([\#18393](https://github.com/element-hq/synapse/issues/18393)) -+* Bump pillow from 11.1.0 to 11.2.1. ([\#18429](https://github.com/element-hq/synapse/issues/18429)) -+* Bump pydantic from 2.10.3 to 2.11.4. ([\#18394](https://github.com/element-hq/synapse/issues/18394)) -+* Bump pyo3-log from 0.12.2 to 0.12.3. ([\#18317](https://github.com/element-hq/synapse/issues/18317)) -+* Bump pyopenssl from 24.3.0 to 25.0.0. ([\#18315](https://github.com/element-hq/synapse/issues/18315)) -+* Bump sha2 from 0.10.8 to 0.10.9. ([\#18395](https://github.com/element-hq/synapse/issues/18395)) -+* Bump sigstore/cosign-installer from 3.8.1 to 3.8.2. ([\#18366](https://github.com/element-hq/synapse/issues/18366)) -+* Bump softprops/action-gh-release from 1 to 2. ([\#18264](https://github.com/element-hq/synapse/issues/18264)) -+* Bump stefanzweifel/git-auto-commit-action from 5.1.0 to 5.2.0. ([\#18354](https://github.com/element-hq/synapse/issues/18354)) -+* Bump txredisapi from 1.4.10 to 1.4.11. ([\#18392](https://github.com/element-hq/synapse/issues/18392)) -+* Bump types-jsonschema from 4.23.0.20240813 to 4.23.0.20241208. ([\#18305](https://github.com/element-hq/synapse/issues/18305)) -+* Bump types-psycopg2 from 2.9.21.20250121 to 2.9.21.20250318. ([\#18316](https://github.com/element-hq/synapse/issues/18316)) -+ - # Synapse 1.129.0 (2025-05-06) - - No significant changes since 1.129.0rc2. -diff --git a/changelog.d/17578.misc b/changelog.d/17578.misc -deleted file mode 100644 -index 7bf69576cd..0000000000 ---- a/changelog.d/17578.misc -+++ /dev/null -@@ -1 +0,0 @@ --Return specific error code when adding an email address / phone number to account is not supported (MSC4178). -diff --git a/changelog.d/18181.misc b/changelog.d/18181.misc -deleted file mode 100644 -index d9ba2f1dd1..0000000000 ---- a/changelog.d/18181.misc -+++ /dev/null -@@ -1 +0,0 @@ --Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later. -diff --git a/changelog.d/18214.feature b/changelog.d/18214.feature -deleted file mode 100644 -index 751cb7d383..0000000000 ---- a/changelog.d/18214.feature -+++ /dev/null -@@ -1 +0,0 @@ --Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. -\ No newline at end of file -diff --git a/changelog.d/18218.doc b/changelog.d/18218.doc -deleted file mode 100644 -index f62da6a0b9..0000000000 ---- a/changelog.d/18218.doc -+++ /dev/null -@@ -1 +0,0 @@ --Improve formatting of the README file. -diff --git a/changelog.d/18237.doc b/changelog.d/18237.doc -deleted file mode 100644 -index 872f7cab7d..0000000000 ---- a/changelog.d/18237.doc -+++ /dev/null -@@ -1 +0,0 @@ --Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider. -\ No newline at end of file -diff --git a/changelog.d/18291.docker b/changelog.d/18291.docker -deleted file mode 100644 -index b94c0e80e3..0000000000 ---- a/changelog.d/18291.docker -+++ /dev/null -@@ -1 +0,0 @@ --In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. -diff --git a/changelog.d/18292.docker b/changelog.d/18292.docker -deleted file mode 100644 -index cdb95b369b..0000000000 ---- a/changelog.d/18292.docker -+++ /dev/null -@@ -1 +0,0 @@ --Optimize the build of the workers image. -diff --git a/changelog.d/18293.docker b/changelog.d/18293.docker -deleted file mode 100644 -index df47a68bfe..0000000000 ---- a/changelog.d/18293.docker -+++ /dev/null -@@ -1 +0,0 @@ --In start_for_complement.sh, replace some external program calls with shell builtins. -diff --git a/changelog.d/18295.docker b/changelog.d/18295.docker -deleted file mode 100644 -index 239def1f54..0000000000 ---- a/changelog.d/18295.docker -+++ /dev/null -@@ -1 +0,0 @@ --When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly. -diff --git a/changelog.d/18297.misc b/changelog.d/18297.misc -deleted file mode 100644 -index 5032d48174..0000000000 ---- a/changelog.d/18297.misc -+++ /dev/null -@@ -1 +0,0 @@ --Apply file hashing and existing quarantines to media downloaded for URL previews. -diff --git a/changelog.d/18300.feature b/changelog.d/18300.feature -deleted file mode 100644 -index 92bea77556..0000000000 ---- a/changelog.d/18300.feature -+++ /dev/null -@@ -1 +0,0 @@ --Add config option `user_directory.exclude_remote_users` which, when enabled, excludes remote users from user directory search results. -\ No newline at end of file -diff --git a/changelog.d/18313.misc b/changelog.d/18313.misc -deleted file mode 100644 -index febf3ac06e..0000000000 ---- a/changelog.d/18313.misc -+++ /dev/null -@@ -1 +0,0 @@ --Allow a few admin APIs used by matrix-authentication-service to run on workers. -diff --git a/changelog.d/18320.doc b/changelog.d/18320.doc -deleted file mode 100644 -index d84c279940..0000000000 ---- a/changelog.d/18320.doc -+++ /dev/null -@@ -1 +0,0 @@ --Fix typo in docs about the `push` config option. Contributed by @HarHarLinks. -diff --git a/changelog.d/18330.misc b/changelog.d/18330.misc -deleted file mode 100644 -index dcf341fa34..0000000000 ---- a/changelog.d/18330.misc -+++ /dev/null -@@ -1 +0,0 @@ --Apply `should_drop_federated_event` to federation invites. -diff --git a/changelog.d/18355.feature b/changelog.d/18355.feature -deleted file mode 100644 -index 4813f0a291..0000000000 ---- a/changelog.d/18355.feature -+++ /dev/null -@@ -1 +0,0 @@ --Add support for handling `GET /devices/` on workers. -diff --git a/changelog.d/18360.misc b/changelog.d/18360.misc -deleted file mode 100644 -index e5bf4f536f..0000000000 ---- a/changelog.d/18360.misc -+++ /dev/null -@@ -1 +0,0 @@ --Allow `/rooms/` admin API to be run on workers. -diff --git a/changelog.d/18363.bugfix b/changelog.d/18363.bugfix -deleted file mode 100644 -index bfa336d52f..0000000000 ---- a/changelog.d/18363.bugfix -+++ /dev/null -@@ -1 +0,0 @@ --Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. -diff --git a/changelog.d/18367.misc b/changelog.d/18367.misc -deleted file mode 100644 -index 2e8b897fa6..0000000000 ---- a/changelog.d/18367.misc -+++ /dev/null -@@ -1 +0,0 @@ --Minor performance improvements to the notifier. -diff --git a/changelog.d/18369.misc b/changelog.d/18369.misc -deleted file mode 100644 -index f4c0e5f006..0000000000 ---- a/changelog.d/18369.misc -+++ /dev/null -@@ -1 +0,0 @@ --Slight performance increase when using the ratelimiter. -diff --git a/changelog.d/18374.misc b/changelog.d/18374.misc -deleted file mode 100644 -index a8efca68d0..0000000000 ---- a/changelog.d/18374.misc -+++ /dev/null -@@ -1 +0,0 @@ --Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token. -\ No newline at end of file -diff --git a/changelog.d/18375.bugfix b/changelog.d/18375.bugfix -deleted file mode 100644 -index faebe6f046..0000000000 ---- a/changelog.d/18375.bugfix -+++ /dev/null -@@ -1 +0,0 @@ --Pass leave from remote invite rejection down Sliding Sync. -diff --git a/changelog.d/18377.doc b/changelog.d/18377.doc -deleted file mode 100644 -index ceb2b64e5d..0000000000 ---- a/changelog.d/18377.doc -+++ /dev/null -@@ -1 +0,0 @@ --Add `/_matrix/federation/v1/version` to list of federation endpoints that can be handled by workers. -diff --git a/changelog.d/18384.doc b/changelog.d/18384.doc -deleted file mode 100644 -index ebcd029639..0000000000 ---- a/changelog.d/18384.doc -+++ /dev/null -@@ -1 +0,0 @@ --Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. -diff --git a/changelog.d/18385.misc b/changelog.d/18385.misc -deleted file mode 100644 -index a8efca68d0..0000000000 ---- a/changelog.d/18385.misc -+++ /dev/null -@@ -1 +0,0 @@ --Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token. -\ No newline at end of file -diff --git a/changelog.d/18390.misc b/changelog.d/18390.misc -deleted file mode 100644 -index e9a08dcfbf..0000000000 ---- a/changelog.d/18390.misc -+++ /dev/null -@@ -1 +0,0 @@ --Fixed test failures when using authlib 1.5.2. -diff --git a/changelog.d/18399.misc b/changelog.d/18399.misc -deleted file mode 100644 -index 847dc9a2b1..0000000000 ---- a/changelog.d/18399.misc -+++ /dev/null -@@ -1 +0,0 @@ --Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Simplified Sliding Sync room list tests to cover both new and fallback logic paths. -diff --git a/debian/changelog b/debian/changelog -index 2c1cb20624..e3eb894851 100644 ---- a/debian/changelog -+++ b/debian/changelog -@@ -1,3 +1,9 @@ -+matrix-synapse-py3 (1.130.0~rc1) stable; urgency=medium -+ -+ * New Synapse release 1.130.0rc1. -+ -+ -- Synapse Packaging team <packages@matrix.org> Tue, 13 May 2025 10:44:04 +0100 -+ - matrix-synapse-py3 (1.129.0) stable; urgency=medium - - * New Synapse release 1.129.0. -diff --git a/pyproject.toml b/pyproject.toml -index 24ae0db05c..5f80d28344 100644 ---- a/pyproject.toml -+++ b/pyproject.toml -@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" - - [tool.poetry] - name = "matrix-synapse" --version = "1.129.0" -+version = "1.130.0rc1" - description = "Homeserver for the Matrix decentralised comms protocol" - authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] - license = "AGPL-3.0-or-later" --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0044-Fix-up-changelog.patch b/packages/overlays/matrix-synapse/patches/0044-Fix-up-changelog.patch deleted file mode 100644
index dc863db..0000000 --- a/packages/overlays/matrix-synapse/patches/0044-Fix-up-changelog.patch +++ /dev/null
@@ -1,43 +0,0 @@ -From 99c15f4630a7c9983c1b134505eaab703c138ea9 Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erik@matrix.org> -Date: Tue, 13 May 2025 10:54:23 +0100 -Subject: [PATCH 44/74] Fix up changelog - ---- - CHANGES.md | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/CHANGES.md b/CHANGES.md -index 235d65c746..a0a9d2f064 100644 ---- a/CHANGES.md -+++ b/CHANGES.md -@@ -8,14 +8,14 @@ - - ### Bugfixes - --- Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. ([\#18363](https://github.com/element-hq/synapse/issues/18363)) -+- Fix a longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. ([\#18363](https://github.com/element-hq/synapse/issues/18363)) - - Pass leave from remote invite rejection down Sliding Sync. ([\#18375](https://github.com/element-hq/synapse/issues/18375)) - - ### Updates to the Docker image - --- In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. ([\#18291](https://github.com/element-hq/synapse/issues/18291)) -+- In `configure_workers_and_start.py`, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. ([\#18291](https://github.com/element-hq/synapse/issues/18291)) - - Optimize the build of the workers image. ([\#18292](https://github.com/element-hq/synapse/issues/18292)) --- In start_for_complement.sh, replace some external program calls with shell builtins. ([\#18293](https://github.com/element-hq/synapse/issues/18293)) -+- In `start_for_complement.sh`, replace some external program calls with shell builtins. ([\#18293](https://github.com/element-hq/synapse/issues/18293)) - - When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly. ([\#18295](https://github.com/element-hq/synapse/issues/18295)) - - ### Improved Documentation -@@ -28,7 +28,7 @@ - - ### Internal Changes - --- Return specific error code when adding an email address / phone number to account is not supported (MSC4178). ([\#17578](https://github.com/element-hq/synapse/issues/17578)) -+- Return specific error code when adding an email address / phone number to account is not supported ([MSC4178](https://github.com/matrix-org/matrix-spec-proposals/pull/4178)). ([\#17578](https://github.com/element-hq/synapse/issues/17578)) - - Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later. ([\#18181](https://github.com/element-hq/synapse/issues/18181)) - - Apply file hashing and existing quarantines to media downloaded for URL previews. ([\#18297](https://github.com/element-hq/synapse/issues/18297)) - - Allow a few admin APIs used by matrix-authentication-service to run on workers. ([\#18313](https://github.com/element-hq/synapse/issues/18313)) --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0053-Move-index-creation-to-background-update-18439.patch b/packages/overlays/matrix-synapse/patches/0053-Move-index-creation-to-background-update-18439.patch deleted file mode 100644
index ee1cfc7..0000000 --- a/packages/overlays/matrix-synapse/patches/0053-Move-index-creation-to-background-update-18439.patch +++ /dev/null
@@ -1,63 +0,0 @@ -From f5ed52c1e24b5649d7d81dd9690bb606e387961b Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erikj@element.io> -Date: Thu, 15 May 2025 12:43:24 +0100 -Subject: [PATCH 53/74] Move index creation to background update (#18439) - -Follow on from #18375. This prevents blocking startup on creating the -index, which can take a while - ---------- - -Co-authored-by: Devon Hudson <devon.dmytro@gmail.com> ---- - changelog.d/18439.bugfix | 1 + - synapse/storage/databases/main/sliding_sync.py | 8 ++++++++ - ...snapshot_idx.sql => 04_ss_membership_snapshot_idx.sql} | 4 ++-- - 3 files changed, 11 insertions(+), 2 deletions(-) - create mode 100644 changelog.d/18439.bugfix - rename synapse/storage/schema/main/delta/92/{03_ss_membership_snapshot_idx.sql => 04_ss_membership_snapshot_idx.sql} (73%) - -diff --git a/changelog.d/18439.bugfix b/changelog.d/18439.bugfix -new file mode 100644 -index 0000000000..5ee9bda474 ---- /dev/null -+++ b/changelog.d/18439.bugfix -@@ -0,0 +1 @@ -+Fix startup being blocked on creating a new index. Introduced in v1.130.0rc1. -diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py -index a287fd2a3f..6a62b11d1e 100644 ---- a/synapse/storage/databases/main/sliding_sync.py -+++ b/synapse/storage/databases/main/sliding_sync.py -@@ -68,6 +68,14 @@ class SlidingSyncStore(SQLBaseStore): - columns=("membership_event_id",), - ) - -+ self.db_pool.updates.register_background_index_update( -+ update_name="sliding_sync_membership_snapshots_user_id_stream_ordering", -+ index_name="sliding_sync_membership_snapshots_user_id_stream_ordering", -+ table="sliding_sync_membership_snapshots", -+ columns=("user_id", "event_stream_ordering"), -+ replaces_index="sliding_sync_membership_snapshots_user_id", -+ ) -+ - async def get_latest_bump_stamp_for_room( - self, - room_id: str, -diff --git a/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql b/synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql -similarity index 73% -rename from synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql -rename to synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql -index c694203f95..6f5b7cb06e 100644 ---- a/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql -+++ b/synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql -@@ -12,5 +12,5 @@ - -- <https://www.gnu.org/licenses/agpl-3.0.html>. - - -- So we can fetch all rooms for a given user sorted by stream order --DROP INDEX IF EXISTS sliding_sync_membership_snapshots_user_id; --CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id, event_stream_ordering); -+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES -+ (9204, 'sliding_sync_membership_snapshots_user_id_stream_ordering', '{}'); --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0063-Fix-up-the-topological-ordering-for-events-above-MAX.patch b/packages/overlays/matrix-synapse/patches/0063-Fix-up-the-topological-ordering-for-events-above-MAX.patch deleted file mode 100644
index 7f666b4..0000000 --- a/packages/overlays/matrix-synapse/patches/0063-Fix-up-the-topological-ordering-for-events-above-MAX.patch +++ /dev/null
@@ -1,342 +0,0 @@ -From 67920c0aca6bb23f76390fa4827ce2e6e1889547 Mon Sep 17 00:00:00 2001 -From: Erik Johnston <erikj@element.io> -Date: Mon, 19 May 2025 13:36:30 +0100 -Subject: [PATCH 63/74] Fix up the topological ordering for events above - `MAX_DEPTH` (#18447) - -Synapse previously did not correctly cap the max depth of an event to -the max canonical json int. This can cause ordering issues for any -events that were sent locally at the time. - -This background update goes and correctly caps the topological ordering -to the new `MAX_DEPTH`. - -c.f. GHSA-v56r-hwv5-mxg6 - ---------- - -Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> ---- - changelog.d/18447.bugfix | 1 + - .../databases/main/events_bg_updates.py | 82 ++++++++- - .../main/delta/92/05_fixup_max_depth_cap.sql | 17 ++ - synapse/types/storage/__init__.py | 2 + - tests/storage/test_events_bg_updates.py | 157 ++++++++++++++++++ - 5 files changed, 258 insertions(+), 1 deletion(-) - create mode 100644 changelog.d/18447.bugfix - create mode 100644 synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql - create mode 100644 tests/storage/test_events_bg_updates.py - -diff --git a/changelog.d/18447.bugfix b/changelog.d/18447.bugfix -new file mode 100644 -index 0000000000..578be1ffe9 ---- /dev/null -+++ b/changelog.d/18447.bugfix -@@ -0,0 +1 @@ -+Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). -diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py -index 4b0bdd79c6..5c83a9f779 100644 ---- a/synapse/storage/databases/main/events_bg_updates.py -+++ b/synapse/storage/databases/main/events_bg_updates.py -@@ -24,7 +24,12 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast - - import attr - --from synapse.api.constants import EventContentFields, Membership, RelationTypes -+from synapse.api.constants import ( -+ MAX_DEPTH, -+ EventContentFields, -+ Membership, -+ RelationTypes, -+) - from synapse.api.room_versions import KNOWN_ROOM_VERSIONS - from synapse.events import EventBase, make_event_from_dict - from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause -@@ -311,6 +316,10 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS - self._sliding_sync_membership_snapshots_fix_forgotten_column_bg_update, - ) - -+ self.db_pool.updates.register_background_update_handler( -+ _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP, self.fixup_max_depth_cap_bg_update -+ ) -+ - # We want this to run on the main database at startup before we start processing - # events. - # -@@ -2547,6 +2556,77 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS - - return num_rows - -+ async def fixup_max_depth_cap_bg_update( -+ self, progress: JsonDict, batch_size: int -+ ) -> int: -+ """Fixes the topological ordering for events that have a depth greater -+ than MAX_DEPTH. This should fix /messages ordering oddities.""" -+ -+ room_id_bound = progress.get("room_id", "") -+ -+ def redo_max_depth_bg_update_txn(txn: LoggingTransaction) -> Tuple[bool, int]: -+ txn.execute( -+ """ -+ SELECT room_id, room_version FROM rooms -+ WHERE room_id > ? -+ ORDER BY room_id -+ LIMIT ? -+ """, -+ (room_id_bound, batch_size), -+ ) -+ -+ # Find the next room ID to process, with a relevant room version. -+ room_ids: List[str] = [] -+ max_room_id: Optional[str] = None -+ for room_id, room_version_str in txn: -+ max_room_id = room_id -+ -+ # We only want to process rooms with a known room version that -+ # has strict canonical json validation enabled. -+ room_version = KNOWN_ROOM_VERSIONS.get(room_version_str) -+ if room_version and room_version.strict_canonicaljson: -+ room_ids.append(room_id) -+ -+ if max_room_id is None: -+ # The query did not return any rooms, so we are done. -+ return True, 0 -+ -+ # Update the progress to the last room ID we pulled from the DB, -+ # this ensures we always make progress. -+ self.db_pool.updates._background_update_progress_txn( -+ txn, -+ _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP, -+ progress={"room_id": max_room_id}, -+ ) -+ -+ if not room_ids: -+ # There were no rooms in this batch that required the fix. -+ return False, 0 -+ -+ clause, list_args = make_in_list_sql_clause( -+ self.database_engine, "room_id", room_ids -+ ) -+ sql = f""" -+ UPDATE events SET topological_ordering = ? -+ WHERE topological_ordering > ? AND {clause} -+ """ -+ args = [MAX_DEPTH, MAX_DEPTH] -+ args.extend(list_args) -+ txn.execute(sql, args) -+ -+ return False, len(room_ids) -+ -+ done, num_rooms = await self.db_pool.runInteraction( -+ "redo_max_depth_bg_update", redo_max_depth_bg_update_txn -+ ) -+ -+ if done: -+ await self.db_pool.updates._end_background_update( -+ _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP -+ ) -+ -+ return num_rooms -+ - - def _resolve_stale_data_in_sliding_sync_tables( - txn: LoggingTransaction, -diff --git a/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql b/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql -new file mode 100644 -index 0000000000..c1ebf8b58b ---- /dev/null -+++ b/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql -@@ -0,0 +1,17 @@ -+-- -+-- This file is licensed under the Affero General Public License (AGPL) version 3. -+-- -+-- Copyright (C) 2025 New Vector, Ltd -+-- -+-- This program is free software: you can redistribute it and/or modify -+-- it under the terms of the GNU Affero General Public License as -+-- published by the Free Software Foundation, either version 3 of the -+-- License, or (at your option) any later version. -+-- -+-- See the GNU Affero General Public License for more details: -+-- <https://www.gnu.org/licenses/agpl-3.0.html>. -+ -+-- Background update that fixes any events with a topological ordering above the -+-- MAX_DEPTH value. -+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES -+ (9205, 'fixup_max_depth_cap', '{}'); -diff --git a/synapse/types/storage/__init__.py b/synapse/types/storage/__init__.py -index e03ff7ffc8..378a15e038 100644 ---- a/synapse/types/storage/__init__.py -+++ b/synapse/types/storage/__init__.py -@@ -52,3 +52,5 @@ class _BackgroundUpdates: - MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE = ( - "mark_unreferenced_state_groups_for_deletion_bg_update" - ) -+ -+ FIXUP_MAX_DEPTH_CAP = "fixup_max_depth_cap" -diff --git a/tests/storage/test_events_bg_updates.py b/tests/storage/test_events_bg_updates.py -new file mode 100644 -index 0000000000..ecdf413e3b ---- /dev/null -+++ b/tests/storage/test_events_bg_updates.py -@@ -0,0 +1,157 @@ -+# -+# This file is licensed under the Affero General Public License (AGPL) version 3. -+# -+# Copyright (C) 2025 New Vector, Ltd -+# -+# This program is free software: you can redistribute it and/or modify -+# it under the terms of the GNU Affero General Public License as -+# published by the Free Software Foundation, either version 3 of the -+# License, or (at your option) any later version. -+# -+# See the GNU Affero General Public License for more details: -+# <https://www.gnu.org/licenses/agpl-3.0.html>. -+# -+# -+ -+from typing import Dict -+ -+from twisted.test.proto_helpers import MemoryReactor -+ -+from synapse.api.constants import MAX_DEPTH -+from synapse.api.room_versions import RoomVersion, RoomVersions -+from synapse.server import HomeServer -+from synapse.util import Clock -+ -+from tests.unittest import HomeserverTestCase -+ -+ -+class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase): -+ """Test the background update that caps topological_ordering at MAX_DEPTH.""" -+ -+ def prepare( -+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer -+ ) -> None: -+ self.store = self.hs.get_datastores().main -+ self.db_pool = self.store.db_pool -+ -+ self.room_id = "!testroom:example.com" -+ -+ # Reinsert the background update as it was already run at the start of -+ # the test. -+ self.get_success( -+ self.db_pool.simple_insert( -+ table="background_updates", -+ values={ -+ "update_name": "fixup_max_depth_cap", -+ "progress_json": "{}", -+ }, -+ ) -+ ) -+ -+ def create_room(self, room_version: RoomVersion) -> Dict[str, int]: -+ """Create a room with a known room version and insert events. -+ -+ Returns the set of event IDs that exceed MAX_DEPTH and -+ their depth. -+ """ -+ -+ # Create a room with a specific room version -+ self.get_success( -+ self.db_pool.simple_insert( -+ table="rooms", -+ values={ -+ "room_id": self.room_id, -+ "room_version": room_version.identifier, -+ }, -+ ) -+ ) -+ -+ # Insert events with some depths exceeding MAX_DEPTH -+ event_id_to_depth: Dict[str, int] = {} -+ for depth in range(MAX_DEPTH - 5, MAX_DEPTH + 5): -+ event_id = f"$event{depth}:example.com" -+ event_id_to_depth[event_id] = depth -+ -+ self.get_success( -+ self.db_pool.simple_insert( -+ table="events", -+ values={ -+ "event_id": event_id, -+ "room_id": self.room_id, -+ "topological_ordering": depth, -+ "depth": depth, -+ "type": "m.test", -+ "sender": "@user:test", -+ "processed": True, -+ "outlier": False, -+ }, -+ ) -+ ) -+ -+ return event_id_to_depth -+ -+ def test_fixup_max_depth_cap_bg_update(self) -> None: -+ """Test that the background update correctly caps topological_ordering -+ at MAX_DEPTH.""" -+ -+ event_id_to_depth = self.create_room(RoomVersions.V6) -+ -+ # Run the background update -+ progress = {"room_id": ""} -+ batch_size = 10 -+ num_rooms = self.get_success( -+ self.store.fixup_max_depth_cap_bg_update(progress, batch_size) -+ ) -+ -+ # Verify the number of rooms processed -+ self.assertEqual(num_rooms, 1) -+ -+ # Verify that the topological_ordering of events has been capped at -+ # MAX_DEPTH -+ rows = self.get_success( -+ self.db_pool.simple_select_list( -+ table="events", -+ keyvalues={"room_id": self.room_id}, -+ retcols=["event_id", "topological_ordering"], -+ ) -+ ) -+ -+ for event_id, topological_ordering in rows: -+ if event_id_to_depth[event_id] >= MAX_DEPTH: -+ # Events with a depth greater than or equal to MAX_DEPTH should -+ # be capped at MAX_DEPTH. -+ self.assertEqual(topological_ordering, MAX_DEPTH) -+ else: -+ # Events with a depth less than MAX_DEPTH should remain -+ # unchanged. -+ self.assertEqual(topological_ordering, event_id_to_depth[event_id]) -+ -+ def test_fixup_max_depth_cap_bg_update_old_room_version(self) -> None: -+ """Test that the background update does not cap topological_ordering for -+ rooms with old room versions.""" -+ -+ event_id_to_depth = self.create_room(RoomVersions.V5) -+ -+ # Run the background update -+ progress = {"room_id": ""} -+ batch_size = 10 -+ num_rooms = self.get_success( -+ self.store.fixup_max_depth_cap_bg_update(progress, batch_size) -+ ) -+ -+ # Verify the number of rooms processed -+ self.assertEqual(num_rooms, 0) -+ -+ # Verify that the topological_ordering of events has been capped at -+ # MAX_DEPTH -+ rows = self.get_success( -+ self.db_pool.simple_select_list( -+ table="events", -+ keyvalues={"room_id": self.room_id}, -+ retcols=["event_id", "topological_ordering"], -+ ) -+ ) -+ -+ # Assert that the topological_ordering of events has not been changed -+ # from their depth. -+ self.assertDictEqual(event_id_to_depth, dict(rows)) --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0068-1.130.0.patch b/packages/overlays/matrix-synapse/patches/0068-1.130.0.patch deleted file mode 100644
index 4c282de..0000000 --- a/packages/overlays/matrix-synapse/patches/0068-1.130.0.patch +++ /dev/null
@@ -1,77 +0,0 @@ -From a36f3a6d875ce92e3cf6f3659f99ad71f8a0c069 Mon Sep 17 00:00:00 2001 -From: Devon Hudson <devonhudson@librem.one> -Date: Tue, 20 May 2025 08:35:23 -0600 -Subject: [PATCH 68/74] 1.130.0 - ---- - CHANGES.md | 10 ++++++++++ - changelog.d/18439.bugfix | 1 - - changelog.d/18447.bugfix | 1 - - debian/changelog | 6 ++++++ - pyproject.toml | 2 +- - 5 files changed, 17 insertions(+), 3 deletions(-) - delete mode 100644 changelog.d/18439.bugfix - delete mode 100644 changelog.d/18447.bugfix - -diff --git a/CHANGES.md b/CHANGES.md -index a0a9d2f064..6837ad6bef 100644 ---- a/CHANGES.md -+++ b/CHANGES.md -@@ -1,3 +1,13 @@ -+# Synapse 1.130.0 (2025-05-20) -+ -+### Bugfixes -+ -+- Fix startup being blocked on creating a new index. Introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439)) -+- Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). ([\#18447](https://github.com/element-hq/synapse/issues/18447)) -+ -+ -+ -+ - # Synapse 1.130.0rc1 (2025-05-13) - - ### Features -diff --git a/changelog.d/18439.bugfix b/changelog.d/18439.bugfix -deleted file mode 100644 -index 5ee9bda474..0000000000 ---- a/changelog.d/18439.bugfix -+++ /dev/null -@@ -1 +0,0 @@ --Fix startup being blocked on creating a new index. Introduced in v1.130.0rc1. -diff --git a/changelog.d/18447.bugfix b/changelog.d/18447.bugfix -deleted file mode 100644 -index 578be1ffe9..0000000000 ---- a/changelog.d/18447.bugfix -+++ /dev/null -@@ -1 +0,0 @@ --Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). -diff --git a/debian/changelog b/debian/changelog -index e3eb894851..56776a7d86 100644 ---- a/debian/changelog -+++ b/debian/changelog -@@ -1,3 +1,9 @@ -+matrix-synapse-py3 (1.130.0) stable; urgency=medium -+ -+ * New Synapse release 1.130.0. -+ -+ -- Synapse Packaging team <packages@matrix.org> Tue, 20 May 2025 08:34:13 -0600 -+ - matrix-synapse-py3 (1.130.0~rc1) stable; urgency=medium - - * New Synapse release 1.130.0rc1. -diff --git a/pyproject.toml b/pyproject.toml -index 5f80d28344..7bc9fd4130 100644 ---- a/pyproject.toml -+++ b/pyproject.toml -@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" - - [tool.poetry] - name = "matrix-synapse" --version = "1.130.0rc1" -+version = "1.130.0" - description = "Homeserver for the Matrix decentralised comms protocol" - authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] - license = "AGPL-3.0-or-later" --- -2.49.0 - diff --git a/packages/overlays/matrix-synapse/patches/0069-Tweak-changelog.patch b/packages/overlays/matrix-synapse/patches/0069-Tweak-changelog.patch deleted file mode 100644
index 4619fab..0000000 --- a/packages/overlays/matrix-synapse/patches/0069-Tweak-changelog.patch +++ /dev/null
@@ -1,25 +0,0 @@ -From f92c6455efbecaba1ddb1595e597aec0d7e4fb42 Mon Sep 17 00:00:00 2001 -From: Devon Hudson <devonhudson@librem.one> -Date: Tue, 20 May 2025 08:46:37 -0600 -Subject: [PATCH 69/74] Tweak changelog - ---- - CHANGES.md | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/CHANGES.md b/CHANGES.md -index 6837ad6bef..d29027bbfb 100644 ---- a/CHANGES.md -+++ b/CHANGES.md -@@ -2,7 +2,7 @@ - - ### Bugfixes - --- Fix startup being blocked on creating a new index. Introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439)) -+- Fix startup being blocked on creating a new index that was introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439)) - - Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). ([\#18447](https://github.com/element-hq/synapse/issues/18447)) - - --- -2.49.0 -