diff --git a/flake.nix b/flake.nix
index f1cf8e9..fb7c6da 100755
--- a/flake.nix
+++ b/flake.nix
@@ -147,7 +147,12 @@
];
nixpkgs.overlays = [
(final: prev: {
- matrix-synapse-unwrapped = inputs.nixpkgs-master.legacyPackages.${pkgs.stdenv.hostPlatform.system}.matrix-synapse-unwrapped;
+ matrix-synapse-unwrapped = inputs.nixpkgs-master.legacyPackages.${pkgs.stdenv.hostPlatform.system}.matrix-synapse-unwrapped.overrideAttrs (oldAttrs: {
+ # apply patches from ./packages/synapse-patches
+ patches = oldAttrs.patches ++ lib.map (
+ path: ./packages/matrix-synapse/patches/${path}
+ ) builtins.attrNames (builtins.readDir ./packages/matrix-synapse/patches);
+ });
#draupnir = inputs.nixpkgs-master.legacyPackages.${pkgs.stdenv.hostPlatform.system}.draupnir;
draupnir = inputs.nixpkgs-DraupnirPkg.legacyPackages.${pkgs.stdenv.hostPlatform.system}.draupnir;
# keydb = inputs.nixpkgs-keydb.legacyPackages.${pkgs.stdenv.hostPlatform.system}.keydb;
diff --git a/host/Rory-nginx/services/matrix/synapse/ratelimits.nix b/host/Rory-nginx/services/matrix/synapse/ratelimits.nix
index ffce1cc..d6d4a94 100644
--- a/host/Rory-nginx/services/matrix/synapse/ratelimits.nix
+++ b/host/Rory-nginx/services/matrix/synapse/ratelimits.nix
@@ -53,7 +53,7 @@
reject_limit = 1000;
concurrent = 100;
};
- federation_rr_transactions_per_room_per_second = 1;
+ federation_rr_transactions_per_room_per_second = 1000;
# media
rc_media_create = {
@@ -82,4 +82,16 @@
per_second = 1000;
burst_count = 1000;
};
+
+ #presence
+ rc_presence.per_user = {
+ per_second = 1;
+ burst_count = 2;
+ };
+
+ #delayed events
+ rc_delayed_event_mgmt = {
+ per_second = 1000;
+ burst_count = 1000;
+ };
}
diff --git a/host/Rory-ovh/services/matrix/synapse/ratelimits.nix b/host/Rory-ovh/services/matrix/synapse/ratelimits.nix
index ffce1cc..85f51f2 100644
--- a/host/Rory-ovh/services/matrix/synapse/ratelimits.nix
+++ b/host/Rory-ovh/services/matrix/synapse/ratelimits.nix
@@ -53,7 +53,7 @@
reject_limit = 1000;
concurrent = 100;
};
- federation_rr_transactions_per_room_per_second = 1;
+ federation_rr_transactions_per_room_per_second = 100;
# media
rc_media_create = {
@@ -82,4 +82,16 @@
per_second = 1000;
burst_count = 1000;
};
+
+ #presence
+ rc_presence.per_user = {
+ per_second = 1;
+ burst_count = 2;
+ };
+
+ #delayed events
+ rc_delayed_event_mgmt = {
+ per_second = 1000;
+ burst_count = 1000;
+ };
}
diff --git a/packages/overlays/matrix-synapse/patches/0001-Move-GET-devices-off-main-process-18355.patch b/packages/overlays/matrix-synapse/patches/0001-Move-GET-devices-off-main-process-18355.patch
new file mode 100644
index 0000000..a89c307
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0001-Move-GET-devices-off-main-process-18355.patch
@@ -0,0 +1,98 @@
+From 33824495ba520f57eae3687db847175b40f71d73 Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erikj@element.io>
+Date: Fri, 25 Apr 2025 15:08:33 +0100
+Subject: [PATCH 01/74] Move GET /devices/ off main process (#18355)
+
+We can't move PUT/DELETE as they do need to happen on main process (due
+to notification of device changes).
+
+---------
+
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18355.feature | 1 +
+ docs/workers.md | 1 +
+ synapse/rest/client/devices.py | 20 ++++++++++++++++++--
+ 3 files changed, 20 insertions(+), 2 deletions(-)
+ create mode 100644 changelog.d/18355.feature
+
+diff --git a/changelog.d/18355.feature b/changelog.d/18355.feature
+new file mode 100644
+index 0000000000..4813f0a291
+--- /dev/null
++++ b/changelog.d/18355.feature
+@@ -0,0 +1 @@
++Add support for handling `GET /devices/` on workers.
+diff --git a/docs/workers.md b/docs/workers.md
+index 08ee493da9..def902d24c 100644
+--- a/docs/workers.md
++++ b/docs/workers.md
+@@ -280,6 +280,7 @@ Additionally, the following REST endpoints can be handled for GET requests:
+
+ ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
+ ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
++ ^/_matrix/client/(api/v1|r0|v3|unstable)/devices/
+
+ # Account data requests
+ ^/_matrix/client/(r0|v3|unstable)/.*/tags
+diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
+index 4607b23494..0b075cc2f2 100644
+--- a/synapse/rest/client/devices.py
++++ b/synapse/rest/client/devices.py
+@@ -143,11 +143,11 @@ class DeviceRestServlet(RestServlet):
+ self.hs = hs
+ self.auth = hs.get_auth()
+ handler = hs.get_device_handler()
+- assert isinstance(handler, DeviceHandler)
+ self.device_handler = handler
+ self.auth_handler = hs.get_auth_handler()
+ self._msc3852_enabled = hs.config.experimental.msc3852_enabled
+ self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
++ self._is_main_process = hs.config.worker.worker_app is None
+
+ async def on_GET(
+ self, request: SynapseRequest, device_id: str
+@@ -179,6 +179,14 @@ class DeviceRestServlet(RestServlet):
+ async def on_DELETE(
+ self, request: SynapseRequest, device_id: str
+ ) -> Tuple[int, JsonDict]:
++ # Can only be run on main process, as changes to device lists must
++ # happen on main.
++ if not self._is_main_process:
++ error_message = "DELETE on /devices/ must be routed to main process"
++ logger.error(error_message)
++ raise SynapseError(500, error_message)
++ assert isinstance(self.device_handler, DeviceHandler)
++
+ requester = await self.auth.get_user_by_req(request)
+
+ try:
+@@ -223,6 +231,14 @@ class DeviceRestServlet(RestServlet):
+ async def on_PUT(
+ self, request: SynapseRequest, device_id: str
+ ) -> Tuple[int, JsonDict]:
++ # Can only be run on main process, as changes to device lists must
++ # happen on main.
++ if not self._is_main_process:
++ error_message = "PUT on /devices/ must be routed to main process"
++ logger.error(error_message)
++ raise SynapseError(500, error_message)
++ assert isinstance(self.device_handler, DeviceHandler)
++
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+
+ body = parse_and_validate_json_object_from_request(request, self.PutBody)
+@@ -585,9 +601,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ ):
+ DeleteDevicesRestServlet(hs).register(http_server)
+ DevicesRestServlet(hs).register(http_server)
++ DeviceRestServlet(hs).register(http_server)
+
+ if hs.config.worker.worker_app is None:
+- DeviceRestServlet(hs).register(http_server)
+ if hs.config.experimental.msc2697_enabled:
+ DehydratedDeviceServlet(hs).register(http_server)
+ ClaimDehydratedDeviceServlet(hs).register(http_server)
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0002-Allow-rooms-admin-API-to-be-on-workers-18360.patch b/packages/overlays/matrix-synapse/patches/0002-Allow-rooms-admin-API-to-be-on-workers-18360.patch
new file mode 100644
index 0000000..7cb9bd1
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0002-Allow-rooms-admin-API-to-be-on-workers-18360.patch
@@ -0,0 +1,80 @@
+From 5b89c9264380da8f9cc55460f8215758fe570010 Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erikj@element.io>
+Date: Fri, 25 Apr 2025 15:18:22 +0100
+Subject: [PATCH 02/74] Allow /rooms/ admin API to be on workers (#18360)
+
+Tested by https://github.com/matrix-org/sytest/pull/1400
+---
+ changelog.d/18360.misc | 1 +
+ docs/workers.md | 1 +
+ synapse/app/generic_worker.py | 3 ++-
+ synapse/rest/admin/__init__.py | 5 +++--
+ 4 files changed, 7 insertions(+), 3 deletions(-)
+ create mode 100644 changelog.d/18360.misc
+
+diff --git a/changelog.d/18360.misc b/changelog.d/18360.misc
+new file mode 100644
+index 0000000000..e5bf4f536f
+--- /dev/null
++++ b/changelog.d/18360.misc
+@@ -0,0 +1 @@
++Allow `/rooms/` admin API to be run on workers.
+diff --git a/docs/workers.md b/docs/workers.md
+index def902d24c..9ebcc886b1 100644
+--- a/docs/workers.md
++++ b/docs/workers.md
+@@ -249,6 +249,7 @@ information.
+ ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
+ ^/_matrix/client/(r0|v3|unstable)/capabilities$
+ ^/_matrix/client/(r0|v3|unstable)/notifications$
++ ^/_synapse/admin/v1/rooms/
+
+ # Encryption requests
+ ^/_matrix/client/(r0|v3|unstable)/keys/query$
+diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
+index a528c3890d..e4120ed424 100644
+--- a/synapse/app/generic_worker.py
++++ b/synapse/app/generic_worker.py
+@@ -52,7 +52,7 @@ from synapse.logging.context import LoggingContext
+ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
+ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
+ from synapse.rest import ClientRestResource
+-from synapse.rest.admin import register_servlets_for_media_repo
++from synapse.rest.admin import AdminRestResource, register_servlets_for_media_repo
+ from synapse.rest.health import HealthResource
+ from synapse.rest.key.v2 import KeyResource
+ from synapse.rest.synapse.client import build_synapse_client_resource_tree
+@@ -190,6 +190,7 @@ class GenericWorkerServer(HomeServer):
+
+ resources.update(build_synapse_client_resource_tree(self))
+ resources["/.well-known"] = well_known_resource(self)
++ resources["/_synapse/admin"] = AdminRestResource(self)
+
+ elif name == "federation":
+ resources[FEDERATION_PREFIX] = TransportLayerServer(self)
+diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
+index f3c99663e8..5977ded4a0 100644
+--- a/synapse/rest/admin/__init__.py
++++ b/synapse/rest/admin/__init__.py
+@@ -275,7 +275,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ """
+ Register all the admin servlets.
+ """
+- # Admin servlets aren't registered on workers.
++ RoomRestServlet(hs).register(http_server)
++
++ # Admin servlets below may not work on workers.
+ if hs.config.worker.worker_app is not None:
+ return
+
+@@ -283,7 +285,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ BlockRoomRestServlet(hs).register(http_server)
+ ListRoomRestServlet(hs).register(http_server)
+ RoomStateRestServlet(hs).register(http_server)
+- RoomRestServlet(hs).register(http_server)
+ RoomRestV2Servlet(hs).register(http_server)
+ RoomMembersRestServlet(hs).register(http_server)
+ DeleteRoomStatusByDeleteIdRestServlet(hs).register(http_server)
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0003-Bump-sigstore-cosign-installer-from-3.8.1-to-3.8.2-1.patch b/packages/overlays/matrix-synapse/patches/0003-Bump-sigstore-cosign-installer-from-3.8.1-to-3.8.2-1.patch
new file mode 100644
index 0000000..0507a50
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0003-Bump-sigstore-cosign-installer-from-3.8.1-to-3.8.2-1.patch
@@ -0,0 +1,26 @@
+From 1482ad1917ef5e022b2d2238d30be74f50b47953 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:05:43 +0100
+Subject: [PATCH 03/74] Bump sigstore/cosign-installer from 3.8.1 to 3.8.2
+ (#18366)
+
+---
+ .github/workflows/docker.yml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
+index 052dcf800b..c617753c7a 100644
+--- a/.github/workflows/docker.yml
++++ b/.github/workflows/docker.yml
+@@ -30,7 +30,7 @@ jobs:
+ run: docker buildx inspect
+
+ - name: Install Cosign
+- uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
++ uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
+
+ - name: Checkout repository
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0004-Bump-actions-add-to-project-from-280af8ae1f83a494cfa.patch b/packages/overlays/matrix-synapse/patches/0004-Bump-actions-add-to-project-from-280af8ae1f83a494cfa.patch
new file mode 100644
index 0000000..14d8061
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0004-Bump-actions-add-to-project-from-280af8ae1f83a494cfa.patch
@@ -0,0 +1,27 @@
+From 2ff977a6c39caa24f35c58f2f5acd948dbdf122b Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:05:55 +0100
+Subject: [PATCH 04/74] Bump actions/add-to-project from
+ 280af8ae1f83a494cfad2cb10f02f6d13529caa9 to
+ 5b1a254a3546aef88e0a7724a77a623fa2e47c36 (#18365)
+
+---
+ .github/workflows/triage_labelled.yml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml
+index feab5906e0..e506be393f 100644
+--- a/.github/workflows/triage_labelled.yml
++++ b/.github/workflows/triage_labelled.yml
+@@ -11,7 +11,7 @@ jobs:
+ if: >
+ contains(github.event.issue.labels.*.name, 'X-Needs-Info')
+ steps:
+- - uses: actions/add-to-project@280af8ae1f83a494cfad2cb10f02f6d13529caa9 # main (v1.0.2 + 10 commits)
++ - uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits)
+ id: add_project
+ with:
+ project-url: "https://github.com/orgs/matrix-org/projects/67"
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0005-Bump-actions-download-artifact-from-4.2.1-to-4.3.0-1.patch b/packages/overlays/matrix-synapse/patches/0005-Bump-actions-download-artifact-from-4.2.1-to-4.3.0-1.patch
new file mode 100644
index 0000000..e9d974e
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0005-Bump-actions-download-artifact-from-4.2.1-to-4.3.0-1.patch
@@ -0,0 +1,26 @@
+From a87981f673fe944690202cc4067a02f0c666eee4 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:06:13 +0100
+Subject: [PATCH 05/74] Bump actions/download-artifact from 4.2.1 to 4.3.0
+ (#18364)
+
+---
+ .github/workflows/release-artifacts.yml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
+index 573264229f..e0b8f2faf4 100644
+--- a/.github/workflows/release-artifacts.yml
++++ b/.github/workflows/release-artifacts.yml
+@@ -203,7 +203,7 @@ jobs:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Download all workflow run artifacts
+- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
++ uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
+ - name: Build a tarball for the debs
+ # We need to merge all the debs uploads into one folder, then compress
+ # that.
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0006-Bump-stefanzweifel-git-auto-commit-action-from-5.1.0.patch b/packages/overlays/matrix-synapse/patches/0006-Bump-stefanzweifel-git-auto-commit-action-from-5.1.0.patch
new file mode 100644
index 0000000..ce2b729
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0006-Bump-stefanzweifel-git-auto-commit-action-from-5.1.0.patch
@@ -0,0 +1,25 @@
+From 4c958c679a9c20930adfa25e64fc237fbf526591 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:06:26 +0100
+Subject: [PATCH 06/74] Bump stefanzweifel/git-auto-commit-action from 5.1.0 to
+ 5.2.0 (#18354)
+
+---
+ .github/workflows/fix_lint.yaml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml
+index fe699c1b2f..923e96a624 100644
+--- a/.github/workflows/fix_lint.yaml
++++ b/.github/workflows/fix_lint.yaml
+@@ -44,6 +44,6 @@ jobs:
+ - run: cargo fmt
+ continue-on-error: true
+
+- - uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0
++ - uses: stefanzweifel/git-auto-commit-action@b863ae1933cb653a53c021fe36dbb774e1fb9403 # v5.2.0
+ with:
+ commit_message: "Attempt to fix linting"
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0007-Bump-anyhow-from-1.0.97-to-1.0.98-18336.patch b/packages/overlays/matrix-synapse/patches/0007-Bump-anyhow-from-1.0.97-to-1.0.98-18336.patch
new file mode 100644
index 0000000..7827e2a
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0007-Bump-anyhow-from-1.0.97-to-1.0.98-18336.patch
@@ -0,0 +1,28 @@
+From 39e17856a37570bda2fa912c6751e31bad6f970b Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:06:36 +0100
+Subject: [PATCH 07/74] Bump anyhow from 1.0.97 to 1.0.98 (#18336)
+
+---
+ Cargo.lock | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Cargo.lock b/Cargo.lock
+index 1b17e9910a..e1c381e273 100644
+--- a/Cargo.lock
++++ b/Cargo.lock
+@@ -13,9 +13,9 @@ dependencies = [
+
+ [[package]]
+ name = "anyhow"
+-version = "1.0.97"
++version = "1.0.98"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+-checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f"
++checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
+
+ [[package]]
+ name = "arc-swap"
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0008-Bump-pyo3-log-from-0.12.2-to-0.12.3-18317.patch b/packages/overlays/matrix-synapse/patches/0008-Bump-pyo3-log-from-0.12.2-to-0.12.3-18317.patch
new file mode 100644
index 0000000..5c5ab88
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0008-Bump-pyo3-log-from-0.12.2-to-0.12.3-18317.patch
@@ -0,0 +1,28 @@
+From 2ef782462011044718b0b3848f0cd33e5b2e1827 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:07:06 +0100
+Subject: [PATCH 08/74] Bump pyo3-log from 0.12.2 to 0.12.3 (#18317)
+
+---
+ Cargo.lock | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Cargo.lock b/Cargo.lock
+index e1c381e273..822eb2cdba 100644
+--- a/Cargo.lock
++++ b/Cargo.lock
+@@ -316,9 +316,9 @@ dependencies = [
+
+ [[package]]
+ name = "pyo3-log"
+-version = "0.12.2"
++version = "0.12.3"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+-checksum = "4b78e4983ba15bc62833a0e0941d965bc03690163f1127864f1408db25063466"
++checksum = "7079e412e909af5d6be7c04a7f29f6a2837a080410e1c529c9dee2c367383db4"
+ dependencies = [
+ "arc-swap",
+ "log",
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0009-Bump-types-psycopg2-from-2.9.21.20250121-to-2.9.21.2.patch b/packages/overlays/matrix-synapse/patches/0009-Bump-types-psycopg2-from-2.9.21.20250121-to-2.9.21.2.patch
new file mode 100644
index 0000000..1416c13
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0009-Bump-types-psycopg2-from-2.9.21.20250121-to-2.9.21.2.patch
@@ -0,0 +1,92 @@
+From b0795d0cb670b2e8e66839e729cce42eb681832e Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:07:15 +0100
+Subject: [PATCH 09/74] Bump types-psycopg2 from 2.9.21.20250121 to
+ 2.9.21.20250318 (#18316)
+
+Bumps [types-psycopg2](https://github.com/python/typeshed) from
+2.9.21.20250121 to 2.9.21.20250318.
+<details>
+<summary>Commits</summary>
+<ul>
+<li>See full diff in <a
+href="https://github.com/python/typeshed/commits">compare view</a></li>
+</ul>
+</details>
+<br />
+
+
+[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
+
+Dependabot will resolve any conflicts with this PR as long as you don't
+alter it yourself. You can also trigger a rebase manually by commenting
+`@dependabot rebase`.
+
+[//]: # (dependabot-automerge-start)
+[//]: # (dependabot-automerge-end)
+
+---
+
+<details>
+<summary>Dependabot commands and options</summary>
+<br />
+
+You can trigger Dependabot actions by commenting on this PR:
+- `@dependabot rebase` will rebase this PR
+- `@dependabot recreate` will recreate this PR, overwriting any edits
+that have been made to it
+- `@dependabot merge` will merge this PR after your CI passes on it
+- `@dependabot squash and merge` will squash and merge this PR after
+your CI passes on it
+- `@dependabot cancel merge` will cancel a previously requested merge
+and block automerging
+- `@dependabot reopen` will reopen this PR if it is closed
+- `@dependabot close` will close this PR and stop Dependabot recreating
+it. You can achieve the same result by closing it manually
+- `@dependabot show <dependency name> ignore conditions` will show all
+of the ignore conditions of the specified dependency
+- `@dependabot ignore this major version` will close this PR and stop
+Dependabot creating any more for this major version (unless you reopen
+the PR or upgrade to it yourself)
+- `@dependabot ignore this minor version` will close this PR and stop
+Dependabot creating any more for this minor version (unless you reopen
+the PR or upgrade to it yourself)
+- `@dependabot ignore this dependency` will close this PR and stop
+Dependabot creating any more for this dependency (unless you reopen the
+PR or upgrade to it yourself)
+
+
+</details>
+
+Signed-off-by: dependabot[bot] <support@github.com>
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ poetry.lock | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index 2bf511e8a6..51e73bae54 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -3007,14 +3007,14 @@ files = [
+
+ [[package]]
+ name = "types-psycopg2"
+-version = "2.9.21.20250121"
++version = "2.9.21.20250318"
+ description = "Typing stubs for psycopg2"
+ optional = false
+ python-versions = ">=3.9"
+ groups = ["dev"]
+ files = [
+- {file = "types_psycopg2-2.9.21.20250121-py3-none-any.whl", hash = "sha256:b890dc6f5a08b6433f0ff73a4ec9a834deedad3e914f2a4a6fd43df021f745f1"},
+- {file = "types_psycopg2-2.9.21.20250121.tar.gz", hash = "sha256:2b0e2cd0f3747af1ae25a7027898716d80209604770ef3cbf350fe055b9c349b"},
++ {file = "types_psycopg2-2.9.21.20250318-py3-none-any.whl", hash = "sha256:7296d111ad950bbd2fc979a1ab0572acae69047f922280e77db657c00d2c79c0"},
++ {file = "types_psycopg2-2.9.21.20250318.tar.gz", hash = "sha256:eb6eac5bfb16adfd5f16b818918b9e26a40ede147e0f2bbffdf53a6ef7025a87"},
+ ]
+
+ [[package]]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0010-Bump-pyopenssl-from-24.3.0-to-25.0.0-18315.patch b/packages/overlays/matrix-synapse/patches/0010-Bump-pyopenssl-from-24.3.0-to-25.0.0-18315.patch
new file mode 100644
index 0000000..4ca3c69
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0010-Bump-pyopenssl-from-24.3.0-to-25.0.0-18315.patch
@@ -0,0 +1,39 @@
+From 7346760aed018eaf46a0bff2d0459b39881d2af5 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:07:33 +0100
+Subject: [PATCH 10/74] Bump pyopenssl from 24.3.0 to 25.0.0 (#18315)
+
+---
+ poetry.lock | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index 51e73bae54..c6a6ce9826 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -2053,18 +2053,19 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
+
+ [[package]]
+ name = "pyopenssl"
+-version = "24.3.0"
++version = "25.0.0"
+ description = "Python wrapper module around the OpenSSL library"
+ optional = false
+ python-versions = ">=3.7"
+ groups = ["main"]
+ files = [
+- {file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"},
+- {file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"},
++ {file = "pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90"},
++ {file = "pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16"},
+ ]
+
+ [package.dependencies]
+ cryptography = ">=41.0.5,<45"
++typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
+
+ [package.extras]
+ docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0011-Bump-types-jsonschema-from-4.23.0.20240813-to-4.23.0.patch b/packages/overlays/matrix-synapse/patches/0011-Bump-types-jsonschema-from-4.23.0.20240813-to-4.23.0.patch
new file mode 100644
index 0000000..57a0e9c
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0011-Bump-types-jsonschema-from-4.23.0.20240813-to-4.23.0.patch
@@ -0,0 +1,35 @@
+From 75832f25b08a058d01acde334033f76edc131ad5 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:07:49 +0100
+Subject: [PATCH 11/74] Bump types-jsonschema from 4.23.0.20240813 to
+ 4.23.0.20241208 (#18305)
+
+---
+ poetry.lock | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index c6a6ce9826..abd97a785b 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -2957,14 +2957,14 @@ files = [
+
+ [[package]]
+ name = "types-jsonschema"
+-version = "4.23.0.20240813"
++version = "4.23.0.20241208"
+ description = "Typing stubs for jsonschema"
+ optional = false
+ python-versions = ">=3.8"
+ groups = ["dev"]
+ files = [
+- {file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"},
+- {file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"},
++ {file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
++ {file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
+ ]
+
+ [package.dependencies]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0012-Bump-softprops-action-gh-release-from-1-to-2-18264.patch b/packages/overlays/matrix-synapse/patches/0012-Bump-softprops-action-gh-release-from-1-to-2-18264.patch
new file mode 100644
index 0000000..6206391
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0012-Bump-softprops-action-gh-release-from-1-to-2-18264.patch
@@ -0,0 +1,25 @@
+From 0384fd72eeaa77dd56b52f38f7b339b95babe8dd Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 29 Apr 2025 10:08:20 +0100
+Subject: [PATCH 12/74] Bump softprops/action-gh-release from 1 to 2 (#18264)
+
+---
+ .github/workflows/release-artifacts.yml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
+index e0b8f2faf4..e03c9d2bd5 100644
+--- a/.github/workflows/release-artifacts.yml
++++ b/.github/workflows/release-artifacts.yml
+@@ -213,7 +213,7 @@ jobs:
+ tar -cvJf debs.tar.xz debs
+ - name: Attach to release
+ # Pinned to work around https://github.com/softprops/action-gh-release/issues/445
+- uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
++ uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v0.1.15
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0013-Do-not-retry-push-during-backoff-period-18363.patch b/packages/overlays/matrix-synapse/patches/0013-Do-not-retry-push-during-backoff-period-18363.patch
new file mode 100644
index 0000000..fffe68d
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0013-Do-not-retry-push-during-backoff-period-18363.patch
@@ -0,0 +1,128 @@
+From e47de2b32de6183fd0cb91dda9b232de5d263345 Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erikj@element.io>
+Date: Tue, 29 Apr 2025 14:08:11 +0100
+Subject: [PATCH 13/74] Do not retry push during backoff period (#18363)
+
+This fixes a bug where if a pusher gets told about a new event to push
+it will ignore the backoff and immediately retry sending any pending
+push.
+---
+ changelog.d/18363.bugfix | 1 +
+ synapse/push/httppusher.py | 6 +++
+ tests/push/test_http.py | 78 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 85 insertions(+)
+ create mode 100644 changelog.d/18363.bugfix
+
+diff --git a/changelog.d/18363.bugfix b/changelog.d/18363.bugfix
+new file mode 100644
+index 0000000000..bfa336d52f
+--- /dev/null
++++ b/changelog.d/18363.bugfix
+@@ -0,0 +1 @@
++Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers.
+diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
+index 69790ecab5..7df8a128c9 100644
+--- a/synapse/push/httppusher.py
++++ b/synapse/push/httppusher.py
+@@ -205,6 +205,12 @@ class HttpPusher(Pusher):
+ if self._is_processing:
+ return
+
++ # Check if we are trying, but failing, to contact the pusher. If so, we
++ # don't try and start processing immediately and instead wait for the
++ # retry loop to try again later (which is controlled by the timer).
++ if self.failing_since and self.timed_call and self.timed_call.active():
++ return
++
+ run_as_background_process("httppush.process", self._process)
+
+ async def _process(self) -> None:
+diff --git a/tests/push/test_http.py b/tests/push/test_http.py
+index 5c235bbe53..b42fd284b6 100644
+--- a/tests/push/test_http.py
++++ b/tests/push/test_http.py
+@@ -1167,3 +1167,81 @@ class HTTPPusherTests(HomeserverTestCase):
+ self.assertEqual(
+ self.push_attempts[0][2]["notification"]["counts"]["unread"], 1
+ )
++
++ def test_push_backoff(self) -> None:
++ """
++ The HTTP pusher will backoff correctly if it fails to contact the pusher.
++ """
++
++ # Register the user who gets notified
++ user_id = self.register_user("user", "pass")
++ access_token = self.login("user", "pass")
++
++ # Register the user who sends the message
++ other_user_id = self.register_user("otheruser", "pass")
++ other_access_token = self.login("otheruser", "pass")
++
++ # Register the pusher
++ user_tuple = self.get_success(
++ self.hs.get_datastores().main.get_user_by_access_token(access_token)
++ )
++ assert user_tuple is not None
++ device_id = user_tuple.device_id
++
++ self.get_success(
++ self.hs.get_pusherpool().add_or_update_pusher(
++ user_id=user_id,
++ device_id=device_id,
++ kind="http",
++ app_id="m.http",
++ app_display_name="HTTP Push Notifications",
++ device_display_name="pushy push",
++ pushkey="a@example.com",
++ lang=None,
++ data={"url": "http://example.com/_matrix/push/v1/notify"},
++ )
++ )
++
++ # Create a room with the other user
++ room = self.helper.create_room_as(user_id, tok=access_token)
++ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
++
++ # The other user sends some messages
++ self.helper.send(room, body="Message 1", tok=other_access_token)
++
++ # One push was attempted to be sent
++ self.assertEqual(len(self.push_attempts), 1)
++ self.assertEqual(
++ self.push_attempts[0][1], "http://example.com/_matrix/push/v1/notify"
++ )
++ self.assertEqual(
++ self.push_attempts[0][2]["notification"]["content"]["body"], "Message 1"
++ )
++ self.push_attempts[0][0].callback({})
++ self.pump()
++
++ # Send another message, this time it fails
++ self.helper.send(room, body="Message 2", tok=other_access_token)
++ self.assertEqual(len(self.push_attempts), 2)
++ self.push_attempts[1][0].errback(Exception("couldn't connect"))
++ self.pump()
++
++ # Sending yet another message doesn't trigger a push immediately
++ self.helper.send(room, body="Message 3", tok=other_access_token)
++ self.pump()
++ self.assertEqual(len(self.push_attempts), 2)
++
++ # .. but waiting for a bit will cause more pushes
++ self.reactor.advance(10)
++ self.assertEqual(len(self.push_attempts), 3)
++ self.assertEqual(
++ self.push_attempts[2][2]["notification"]["content"]["body"], "Message 2"
++ )
++ self.push_attempts[2][0].callback({})
++ self.pump()
++
++ self.assertEqual(len(self.push_attempts), 4)
++ self.assertEqual(
++ self.push_attempts[3][2]["notification"]["content"]["body"], "Message 3"
++ )
++ self.push_attempts[3][0].callback({})
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0014-Slight-performance-increase-when-using-the-ratelimit.patch b/packages/overlays/matrix-synapse/patches/0014-Slight-performance-increase-when-using-the-ratelimit.patch
new file mode 100644
index 0000000..e6c87b2
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0014-Slight-performance-increase-when-using-the-ratelimit.patch
@@ -0,0 +1,123 @@
+From ad140130cc3db503de3fd15aa2923417f46b700b Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erikj@element.io>
+Date: Tue, 29 Apr 2025 14:08:22 +0100
+Subject: [PATCH 14/74] Slight performance increase when using the ratelimiter
+ (#18369)
+
+See the commits.
+---
+ changelog.d/18369.misc | 1 +
+ synapse/api/ratelimiting.py | 19 ++++++++-----------
+ synapse/rest/client/sync.py | 7 +++----
+ tests/api/test_ratelimiting.py | 4 +---
+ 4 files changed, 13 insertions(+), 18 deletions(-)
+ create mode 100644 changelog.d/18369.misc
+
+diff --git a/changelog.d/18369.misc b/changelog.d/18369.misc
+new file mode 100644
+index 0000000000..f4c0e5f006
+--- /dev/null
++++ b/changelog.d/18369.misc
+@@ -0,0 +1 @@
++Slight performance increase when using the ratelimiter.
+diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
+index 229329a5ae..8665b3b765 100644
+--- a/synapse/api/ratelimiting.py
++++ b/synapse/api/ratelimiting.py
+@@ -20,8 +20,7 @@
+ #
+ #
+
+-from collections import OrderedDict
+-from typing import Hashable, Optional, Tuple
++from typing import Dict, Hashable, Optional, Tuple
+
+ from synapse.api.errors import LimitExceededError
+ from synapse.config.ratelimiting import RatelimitSettings
+@@ -80,12 +79,14 @@ class Ratelimiter:
+ self.store = store
+ self._limiter_name = cfg.key
+
+- # An ordered dictionary representing the token buckets tracked by this rate
++ # A dictionary representing the token buckets tracked by this rate
+ # limiter. Each entry maps a key of arbitrary type to a tuple representing:
+ # * The number of tokens currently in the bucket,
+ # * The time point when the bucket was last completely empty, and
+ # * The rate_hz (leak rate) of this particular bucket.
+- self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict()
++ self.actions: Dict[Hashable, Tuple[float, float, float]] = {}
++
++ self.clock.looping_call(self._prune_message_counts, 60 * 1000)
+
+ def _get_key(
+ self, requester: Optional[Requester], key: Optional[Hashable]
+@@ -169,9 +170,6 @@ class Ratelimiter:
+ rate_hz = rate_hz if rate_hz is not None else self.rate_hz
+ burst_count = burst_count if burst_count is not None else self.burst_count
+
+- # Remove any expired entries
+- self._prune_message_counts(time_now_s)
+-
+ # Check if there is an existing count entry for this key
+ action_count, time_start, _ = self._get_action_counts(key, time_now_s)
+
+@@ -246,13 +244,12 @@ class Ratelimiter:
+ action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s)
+ self.actions[key] = (action_count + n_actions, time_start, rate_hz)
+
+- def _prune_message_counts(self, time_now_s: float) -> None:
++ def _prune_message_counts(self) -> None:
+ """Remove message count entries that have not exceeded their defined
+ rate_hz limit
+-
+- Args:
+- time_now_s: The current time
+ """
++ time_now_s = self.clock.time()
++
+ # We create a copy of the key list here as the dictionary is modified during
+ # the loop
+ for key in list(self.actions.keys()):
+diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
+index 4fb9c0c8e7..bac02122d0 100644
+--- a/synapse/rest/client/sync.py
++++ b/synapse/rest/client/sync.py
+@@ -24,7 +24,7 @@ from collections import defaultdict
+ from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
+
+ from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
+-from synapse.api.errors import Codes, LimitExceededError, StoreError, SynapseError
++from synapse.api.errors import Codes, StoreError, SynapseError
+ from synapse.api.filtering import FilterCollection
+ from synapse.api.presence import UserPresenceState
+ from synapse.api.ratelimiting import Ratelimiter
+@@ -248,9 +248,8 @@ class SyncRestServlet(RestServlet):
+ await self._server_notices_sender.on_user_syncing(user.to_string())
+
+ # ignore the presence update if the ratelimit is exceeded but do not pause the request
+- try:
+- await self._presence_per_user_limiter.ratelimit(requester, pause=0.0)
+- except LimitExceededError:
++ allowed, _ = await self._presence_per_user_limiter.can_do_action(requester)
++ if not allowed:
+ affect_presence = False
+ logger.debug("User set_presence ratelimit exceeded; ignoring it.")
+ else:
+diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py
+index a59e168db1..1a1cbde74e 100644
+--- a/tests/api/test_ratelimiting.py
++++ b/tests/api/test_ratelimiting.py
+@@ -220,9 +220,7 @@ class TestRatelimiter(unittest.HomeserverTestCase):
+
+ self.assertIn("test_id_1", limiter.actions)
+
+- self.get_success_or_raise(
+- limiter.can_do_action(None, key="test_id_2", _time_now_s=10)
+- )
++ self.reactor.advance(60)
+
+ self.assertNotIn("test_id_1", limiter.actions)
+
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0015-Minor-performance-improvements-to-notifier-replicati.patch b/packages/overlays/matrix-synapse/patches/0015-Minor-performance-improvements-to-notifier-replicati.patch
new file mode 100644
index 0000000..1e2bdec
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0015-Minor-performance-improvements-to-notifier-replicati.patch
@@ -0,0 +1,116 @@
+From 4eaab31757f096a04f4278d722cdef1eb92a1743 Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erikj@element.io>
+Date: Tue, 29 Apr 2025 14:08:32 +0100
+Subject: [PATCH 15/74] Minor performance improvements to notifier/replication
+ (#18367)
+
+These are some improvements to `on_new_event` which is a hot path. Not
+sure how much this will save, but maybe like ~5%?
+
+Possibly easier to review commit-by-commit
+---
+ changelog.d/18367.misc | 1 +
+ synapse/notifier.py | 61 +++++++++++++++++++++---------------------
+ 2 files changed, 32 insertions(+), 30 deletions(-)
+ create mode 100644 changelog.d/18367.misc
+
+diff --git a/changelog.d/18367.misc b/changelog.d/18367.misc
+new file mode 100644
+index 0000000000..2e8b897fa6
+--- /dev/null
++++ b/changelog.d/18367.misc
+@@ -0,0 +1 @@
++Minor performance improvements to the notifier.
+diff --git a/synapse/notifier.py b/synapse/notifier.py
+index 88f531182a..1914d0c914 100644
+--- a/synapse/notifier.py
++++ b/synapse/notifier.py
+@@ -66,7 +66,6 @@ from synapse.types import (
+ from synapse.util.async_helpers import (
+ timeout_deferred,
+ )
+-from synapse.util.metrics import Measure
+ from synapse.util.stringutils import shortstr
+ from synapse.visibility import filter_events_for_client
+
+@@ -520,20 +519,22 @@ class Notifier:
+ users = users or []
+ rooms = rooms or []
+
+- with Measure(self.clock, "on_new_event"):
+- user_streams: Set[_NotifierUserStream] = set()
+-
+- log_kv(
+- {
+- "waking_up_explicit_users": len(users),
+- "waking_up_explicit_rooms": len(rooms),
+- "users": shortstr(users),
+- "rooms": shortstr(rooms),
+- "stream": stream_key,
+- "stream_id": new_token,
+- }
+- )
++ user_streams: Set[_NotifierUserStream] = set()
++
++ log_kv(
++ {
++ "waking_up_explicit_users": len(users),
++ "waking_up_explicit_rooms": len(rooms),
++ "users": shortstr(users),
++ "rooms": shortstr(rooms),
++ "stream": stream_key,
++ "stream_id": new_token,
++ }
++ )
+
++ # Only calculate which user streams to wake up if there are, in fact,
++ # any user streams registered.
++ if self.user_to_user_stream or self.room_to_user_streams:
+ for user in users:
+ user_stream = self.user_to_user_stream.get(str(user))
+ if user_stream is not None:
+@@ -565,25 +566,25 @@ class Notifier:
+ # We resolve all these deferreds in one go so that we only need to
+ # call `PreserveLoggingContext` once, as it has a bunch of overhead
+ # (to calculate performance stats)
+- with PreserveLoggingContext():
+- for listener in listeners:
+- listener.callback(current_token)
++ if listeners:
++ with PreserveLoggingContext():
++ for listener in listeners:
++ listener.callback(current_token)
+
+- users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
++ if user_streams:
++ users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
+
+- self.notify_replication()
++ self.notify_replication()
+
+- # Notify appservices.
+- try:
+- self.appservice_handler.notify_interested_services_ephemeral(
+- stream_key,
+- new_token,
+- users,
+- )
+- except Exception:
+- logger.exception(
+- "Error notifying application services of ephemeral events"
+- )
++ # Notify appservices.
++ try:
++ self.appservice_handler.notify_interested_services_ephemeral(
++ stream_key,
++ new_token,
++ users,
++ )
++ except Exception:
++ logger.exception("Error notifying application services of ephemeral events")
+
+ def on_new_replication_data(self) -> None:
+ """Used to inform replication listeners that something has happened
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0016-Fix-typo-in-docs-about-push-18320.patch b/packages/overlays/matrix-synapse/patches/0016-Fix-typo-in-docs-about-push-18320.patch
new file mode 100644
index 0000000..c92436a
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0016-Fix-typo-in-docs-about-push-18320.patch
@@ -0,0 +1,34 @@
+From f79811ed80bebaa5b187637af6d16d413b07166e Mon Sep 17 00:00:00 2001
+From: Kim Brose <2803622+HarHarLinks@users.noreply.github.com>
+Date: Wed, 30 Apr 2025 15:27:08 +0200
+Subject: [PATCH 16/74] Fix typo in docs about `push` (#18320)
+
+---
+ changelog.d/18320.doc | 1 +
+ docs/usage/configuration/config_documentation.md | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+ create mode 100644 changelog.d/18320.doc
+
+diff --git a/changelog.d/18320.doc b/changelog.d/18320.doc
+new file mode 100644
+index 0000000000..d84c279940
+--- /dev/null
++++ b/changelog.d/18320.doc
+@@ -0,0 +1 @@
++Fix typo in docs about the `push` config option. Contributed by @HarHarLinks.
+diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
+index 73fd9622ce..19dc9dd356 100644
+--- a/docs/usage/configuration/config_documentation.md
++++ b/docs/usage/configuration/config_documentation.md
+@@ -4018,7 +4018,7 @@ This option has a number of sub-options. They are as follows:
+ * `include_content`: Clients requesting push notifications can either have the body of
+ the message sent in the notification poke along with other details
+ like the sender, or just the event ID and room ID (`event_id_only`).
+- If clients choose the to have the body sent, this option controls whether the
++ If clients choose to have the body sent, this option controls whether the
+ notification request includes the content of the event (other details
+ like the sender are still included). If `event_id_only` is enabled, it
+ has no effect.
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0017-Optimize-Dockerfile-workers-18292.patch b/packages/overlays/matrix-synapse/patches/0017-Optimize-Dockerfile-workers-18292.patch
new file mode 100644
index 0000000..6a0c1a3
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0017-Optimize-Dockerfile-workers-18292.patch
@@ -0,0 +1,138 @@
+From 4097ada89fefe12e7ec6d2b7a3bfbc61e64e14a0 Mon Sep 17 00:00:00 2001
+From: Andrew Ferrazzutti <andrewf@element.io>
+Date: Wed, 30 Apr 2025 09:54:30 -0400
+Subject: [PATCH 17/74] Optimize `Dockerfile-workers` (#18292)
+
+- Use a `uv:python` image for the first build layer, to reduce the
+number of intermediate images required, as the
+main Dockerfile uses that image already
+- Use a cache mount for `apt` commands
+- Skip a pointless install of `redis-server`, since the redis Docker
+image is copied from instead
+- Move some RUN steps out of the final image layer & into the build
+layer
+
+Depends on https://github.com/element-hq/synapse/pull/18275
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [x] Pull request is based on the develop branch
+* [x] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [x] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+---
+ changelog.d/18292.docker | 1 +
+ docker/Dockerfile-workers | 50 +++++++++++++++++++++------------------
+ 2 files changed, 28 insertions(+), 23 deletions(-)
+ create mode 100644 changelog.d/18292.docker
+
+diff --git a/changelog.d/18292.docker b/changelog.d/18292.docker
+new file mode 100644
+index 0000000000..cdb95b369b
+--- /dev/null
++++ b/changelog.d/18292.docker
+@@ -0,0 +1 @@
++Optimize the build of the workers image.
+diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
+index dd0bf59994..a7f576184d 100644
+--- a/docker/Dockerfile-workers
++++ b/docker/Dockerfile-workers
+@@ -3,18 +3,37 @@
+ ARG SYNAPSE_VERSION=latest
+ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
+ ARG DEBIAN_VERSION=bookworm
++ARG PYTHON_VERSION=3.12
+
+-# first of all, we create a base image with an nginx which we can copy into the
++# first of all, we create a base image with dependencies which we can copy into the
+ # target image. For repeated rebuilds, this is much faster than apt installing
+ # each time.
+
+-FROM docker.io/library/debian:${DEBIAN_VERSION}-slim AS deps_base
++FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base
++
++ # Tell apt to keep downloaded package files, as we're using cache mounts.
++ RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
++
+ RUN \
+ --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
+ apt-get update -qq && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
+- redis-server nginx-light
++ nginx-light
++
++ RUN \
++ # remove default page
++ rm /etc/nginx/sites-enabled/default && \
++ # have nginx log to stderr/out
++ ln -sf /dev/stdout /var/log/nginx/access.log && \
++ ln -sf /dev/stderr /var/log/nginx/error.log
++
++ # --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache
++ # (mounted as --mount=type=cache) and the target directory.
++ RUN --mount=type=cache,target=/root/.cache/uv \
++ uv pip install --link-mode=copy --prefix="/uv/usr/local" supervisor~=4.2
++
++ RUN mkdir -p /uv/etc/supervisor/conf.d
+
+ # Similarly, a base to copy the redis server from.
+ #
+@@ -27,31 +46,16 @@ FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base
+ # now build the final image, based on the the regular Synapse docker image
+ FROM $FROM
+
+- # Install supervisord with uv pip instead of apt, to avoid installing a second
+- # copy of python.
+- # --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache
+- # (mounted as --mount=type=cache) and the target directory.
+- RUN \
+- --mount=type=bind,from=ghcr.io/astral-sh/uv:0.6.8,source=/uv,target=/uv \
+- --mount=type=cache,target=/root/.cache/uv \
+- /uv pip install --link-mode=copy --prefix="/usr/local" supervisor~=4.2
+-
+- RUN mkdir -p /etc/supervisor/conf.d
+-
+- # Copy over redis and nginx
++ # Copy over dependencies
+ COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
+-
++ COPY --from=deps_base /uv /
+ COPY --from=deps_base /usr/sbin/nginx /usr/sbin
+ COPY --from=deps_base /usr/share/nginx /usr/share/nginx
+ COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
+ COPY --from=deps_base /etc/nginx /etc/nginx
+- RUN rm /etc/nginx/sites-enabled/default
+- RUN mkdir /var/log/nginx /var/lib/nginx
+- RUN chown www-data /var/lib/nginx
+-
+- # have nginx log to stderr/out
+- RUN ln -sf /dev/stdout /var/log/nginx/access.log
+- RUN ln -sf /dev/stderr /var/log/nginx/error.log
++ COPY --from=deps_base /var/log/nginx /var/log/nginx
++ # chown to allow non-root user to write to http-*-temp-path dirs
++ COPY --from=deps_base --chown=www-data:root /var/lib/nginx /var/lib/nginx
+
+ # Copy Synapse worker, nginx and supervisord configuration template files
+ COPY ./docker/conf-workers/* /conf/
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0018-configure_workers_and_start.py-unify-python-path-182.patch b/packages/overlays/matrix-synapse/patches/0018-configure_workers_and_start.py-unify-python-path-182.patch
new file mode 100644
index 0000000..077f5d0
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0018-configure_workers_and_start.py-unify-python-path-182.patch
@@ -0,0 +1,73 @@
+From 7563b2a2a316a7b249ef847ddbf5b63064eb1cc2 Mon Sep 17 00:00:00 2001
+From: Andrew Ferrazzutti <andrewf@element.io>
+Date: Wed, 30 Apr 2025 10:22:09 -0400
+Subject: [PATCH 18/74] configure_workers_and_start.py: unify python path
+ (#18291)
+
+Use absolute path for python in script shebang, and invoke child python
+processes with sys.executable. This is consistent with the absolute path
+used to invoke python elsewhere (like in the supervisor config).
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [x] Pull request is based on the develop branch
+* [x] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [x] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+
+---------
+
+Co-authored-by: Quentin Gliech <quenting@element.io>
+---
+ changelog.d/18291.docker | 1 +
+ docker/configure_workers_and_start.py | 4 ++--
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+ create mode 100644 changelog.d/18291.docker
+
+diff --git a/changelog.d/18291.docker b/changelog.d/18291.docker
+new file mode 100644
+index 0000000000..b94c0e80e3
+--- /dev/null
++++ b/changelog.d/18291.docker
+@@ -0,0 +1 @@
++In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`.
+diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
+index 6d73e8feaa..ff5cff3221 100755
+--- a/docker/configure_workers_and_start.py
++++ b/docker/configure_workers_and_start.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/local/bin/python
+ #
+ # This file is licensed under the Affero General Public License (AGPL) version 3.
+ #
+@@ -604,7 +604,7 @@ def generate_base_homeserver_config() -> None:
+ # start.py already does this for us, so just call that.
+ # note that this script is copied in in the official, monolith dockerfile
+ os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
+- subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
++ subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True)
+
+
+ def parse_worker_types(
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0019-docker-use-shebangs-to-invoke-generated-scripts-1829.patch b/packages/overlays/matrix-synapse/patches/0019-docker-use-shebangs-to-invoke-generated-scripts-1829.patch
new file mode 100644
index 0000000..a86d43e
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0019-docker-use-shebangs-to-invoke-generated-scripts-1829.patch
@@ -0,0 +1,100 @@
+From 5ab05e7b95a687967fe99be33cb33a9c62fee34b Mon Sep 17 00:00:00 2001
+From: Andrew Ferrazzutti <andrewf@element.io>
+Date: Wed, 30 Apr 2025 10:26:08 -0400
+Subject: [PATCH 19/74] docker: use shebangs to invoke generated scripts
+ (#18295)
+
+When generating scripts from templates, don't add a leading newline so
+that their shebangs may be handled correctly.
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [x] Pull request is based on the develop branch
+* [x] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [x] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+
+---------
+
+Co-authored-by: Quentin Gliech <quenting@element.io>
+---
+ changelog.d/18295.docker | 1 +
+ docker/Dockerfile-workers | 2 +-
+ docker/complement/Dockerfile | 2 +-
+ docker/configure_workers_and_start.py | 5 ++++-
+ 4 files changed, 7 insertions(+), 3 deletions(-)
+ create mode 100644 changelog.d/18295.docker
+
+diff --git a/changelog.d/18295.docker b/changelog.d/18295.docker
+new file mode 100644
+index 0000000000..239def1f54
+--- /dev/null
++++ b/changelog.d/18295.docker
+@@ -0,0 +1 @@
++When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly.
+diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
+index a7f576184d..6d0fc1440b 100644
+--- a/docker/Dockerfile-workers
++++ b/docker/Dockerfile-workers
+@@ -74,4 +74,4 @@ FROM $FROM
+ # Replace the healthcheck with one which checks *all* the workers. The script
+ # is generated by configure_workers_and_start.py.
+ HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
+- CMD /bin/sh /healthcheck.sh
++ CMD ["/healthcheck.sh"]
+diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
+index dd029c5fbc..6ed084fe5d 100644
+--- a/docker/complement/Dockerfile
++++ b/docker/complement/Dockerfile
+@@ -58,4 +58,4 @@ ENTRYPOINT ["/start_for_complement.sh"]
+
+ # Update the healthcheck to have a shorter check interval
+ HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
+- CMD /bin/sh /healthcheck.sh
++ CMD ["/healthcheck.sh"]
+diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
+index ff5cff3221..8f96e57e50 100755
+--- a/docker/configure_workers_and_start.py
++++ b/docker/configure_workers_and_start.py
+@@ -376,9 +376,11 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
+ #
+ # We use append mode in case the files have already been written to by something else
+ # (for instance, as part of the instructions in a dockerfile).
++ exists = os.path.isfile(dst)
+ with open(dst, "a") as outfile:
+ # In case the existing file doesn't end with a newline
+- outfile.write("\n")
++ if exists:
++ outfile.write("\n")
+
+ outfile.write(rendered)
+
+@@ -998,6 +1000,7 @@ def generate_worker_files(
+ "/healthcheck.sh",
+ healthcheck_urls=healthcheck_urls,
+ )
++ os.chmod("/healthcheck.sh", 0o755)
+
+ # Ensure the logging directory exists
+ log_dir = data_dir + "/logs"
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0020-start_for_complement.sh-use-more-shell-builtins-1829.patch b/packages/overlays/matrix-synapse/patches/0020-start_for_complement.sh-use-more-shell-builtins-1829.patch
new file mode 100644
index 0000000..1f45cdc
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0020-start_for_complement.sh-use-more-shell-builtins-1829.patch
@@ -0,0 +1,91 @@
+From 7be6c711d4a57f990003613c0b9715e3ac1502cb Mon Sep 17 00:00:00 2001
+From: Andrew Ferrazzutti <andrewf@element.io>
+Date: Wed, 30 Apr 2025 11:53:15 -0400
+Subject: [PATCH 20/74] start_for_complement.sh: use more shell builtins
+ (#18293)
+
+Avoid calling external tools when shell builtins suffice.
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [x] Pull request is based on the develop branch
+* [x] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [x] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+
+---------
+
+Co-authored-by: Quentin Gliech <quenting@element.io>
+---
+ changelog.d/18293.docker | 1 +
+ docker/complement/conf/start_for_complement.sh | 11 +++++------
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+ create mode 100644 changelog.d/18293.docker
+
+diff --git a/changelog.d/18293.docker b/changelog.d/18293.docker
+new file mode 100644
+index 0000000000..df47a68bfe
+--- /dev/null
++++ b/changelog.d/18293.docker
+@@ -0,0 +1 @@
++In start_for_complement.sh, replace some external program calls with shell builtins.
+diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
+index 59b30e2051..a5e06396e2 100755
+--- a/docker/complement/conf/start_for_complement.sh
++++ b/docker/complement/conf/start_for_complement.sh
+@@ -9,7 +9,7 @@ echo " Args: $*"
+ echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
+
+ function log {
+- d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
++ d=$(printf '%(%Y-%m-%d %H:%M:%S)T,%.3s\n' ${EPOCHREALTIME/./ })
+ echo "$d $*"
+ }
+
+@@ -103,12 +103,11 @@ fi
+ # Note that both the key and certificate are in PEM format (not DER).
+
+ # First generate a configuration file to set up a Subject Alternative Name.
+-cat > /conf/server.tls.conf <<EOF
++echo "\
+ .include /etc/ssl/openssl.cnf
+
+ [SAN]
+-subjectAltName=DNS:${SERVER_NAME}
+-EOF
++subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf
+
+ # Generate an RSA key
+ openssl genrsa -out /conf/server.tls.key 2048
+@@ -123,8 +122,8 @@ openssl x509 -req -in /conf/server.tls.csr \
+ -out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
+
+ # Assert that we have a Subject Alternative Name in the certificate.
+-# (grep will exit with 1 here if there isn't a SAN in the certificate.)
+-openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
++# (the test will exit with 1 here if there isn't a SAN in the certificate.)
++[[ $(openssl x509 -in /conf/server.tls.crt -noout -text) == *DNS:* ]]
+
+ export SYNAPSE_TLS_CERT=/conf/server.tls.crt
+ export SYNAPSE_TLS_KEY=/conf/server.tls.key
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0021-Added-Pocket-ID-to-openid.md-18237.patch b/packages/overlays/matrix-synapse/patches/0021-Added-Pocket-ID-to-openid.md-18237.patch
new file mode 100644
index 0000000..6816c77
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0021-Added-Pocket-ID-to-openid.md-18237.patch
@@ -0,0 +1,67 @@
+From d59bbd8b6b342d41641fddf99035d38e3939f18c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Martin=20Lav=C3=A9n?= <laven.martin@gmail.com>
+Date: Wed, 30 Apr 2025 18:13:09 +0200
+Subject: [PATCH 21/74] Added Pocket ID to openid.md (#18237)
+
+---
+ changelog.d/18237.doc | 1 +
+ docs/openid.md | 27 +++++++++++++++++++++++++++
+ 2 files changed, 28 insertions(+)
+ create mode 100644 changelog.d/18237.doc
+
+diff --git a/changelog.d/18237.doc b/changelog.d/18237.doc
+new file mode 100644
+index 0000000000..872f7cab7d
+--- /dev/null
++++ b/changelog.d/18237.doc
+@@ -0,0 +1 @@
++Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider.
+\ No newline at end of file
+diff --git a/docs/openid.md b/docs/openid.md
+index 5a3d7e9fba..f86ba189c7 100644
+--- a/docs/openid.md
++++ b/docs/openid.md
+@@ -23,6 +23,7 @@ such as [Github][github-idp].
+ [auth0]: https://auth0.com/
+ [authentik]: https://goauthentik.io/
+ [lemonldap]: https://lemonldap-ng.org/
++[pocket-id]: https://pocket-id.org/
+ [okta]: https://www.okta.com/
+ [dex-idp]: https://github.com/dexidp/dex
+ [keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols
+@@ -624,6 +625,32 @@ oidc_providers:
+
+ Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
+
++### Pocket ID
++
++[Pocket ID][pocket-id] is a simple OIDC provider that allows users to authenticate with their passkeys.
++1. Go to `OIDC Clients`
++2. Click on `Add OIDC Client`
++3. Add a name, for example `Synapse`
++4. Add `"https://auth.example.org/_synapse/client/oidc/callback` to `Callback URLs` # Replace `auth.example.org` with your domain
++5. Click on `Save`
++6. Note down your `Client ID` and `Client secret`, these will be used later
++
++Synapse config:
++
++```yaml
++oidc_providers:
++ - idp_id: pocket_id
++ idp_name: Pocket ID
++ issuer: "https://auth.example.org/" # Replace with your domain
++ client_id: "your-client-id" # Replace with the "Client ID" you noted down before
++ client_secret: "your-client-secret" # Replace with the "Client secret" you noted down before
++ scopes: ["openid", "profile"]
++ user_mapping_provider:
++ config:
++ localpart_template: "{{ user.preferred_username }}"
++ display_name_template: "{{ user.name }}"
++```
++
+ ### Shibboleth with OIDC Plugin
+
+ [Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0022-docs-workers.md-Add-_matrix-federation-v1-event-to-l.patch b/packages/overlays/matrix-synapse/patches/0022-docs-workers.md-Add-_matrix-federation-v1-event-to-l.patch
new file mode 100644
index 0000000..828f433
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0022-docs-workers.md-Add-_matrix-federation-v1-event-to-l.patch
@@ -0,0 +1,69 @@
+From 2965c9970c0b2742885dc345f6d70df7d5686423 Mon Sep 17 00:00:00 2001
+From: Sebastian Spaeth <Sebastian@SSpaeth.de>
+Date: Thu, 1 May 2025 16:11:59 +0200
+Subject: [PATCH 22/74] docs/workers.md: Add ^/_matrix/federation/v1/event/ to
+ list of delegatable endpoints (#18377)
+
+---
+ changelog.d/18377.doc | 1 +
+ docker/configure_workers_and_start.py | 1 +
+ docs/upgrade.md | 10 ++++++++++
+ docs/workers.md | 1 +
+ 4 files changed, 13 insertions(+)
+ create mode 100644 changelog.d/18377.doc
+
+diff --git a/changelog.d/18377.doc b/changelog.d/18377.doc
+new file mode 100644
+index 0000000000..ceb2b64e5d
+--- /dev/null
++++ b/changelog.d/18377.doc
+@@ -0,0 +1 @@
++Add `/_matrix/federation/v1/version` to list of federation endpoints that can be handled by workers.
+diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
+index 8f96e57e50..df34d51f77 100755
+--- a/docker/configure_workers_and_start.py
++++ b/docker/configure_workers_and_start.py
+@@ -202,6 +202,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["federation"],
+ "endpoint_patterns": [
++ "^/_matrix/federation/v1/version$",
+ "^/_matrix/federation/(v1|v2)/event/",
+ "^/_matrix/federation/(v1|v2)/state/",
+ "^/_matrix/federation/(v1|v2)/state_ids/",
+diff --git a/docs/upgrade.md b/docs/upgrade.md
+index 07a9641fdd..d508e2231e 100644
+--- a/docs/upgrade.md
++++ b/docs/upgrade.md
+@@ -117,6 +117,16 @@ each upgrade are complete before moving on to the next upgrade, to avoid
+ stacking them up. You can monitor the currently running background updates with
+ [the Admin API](usage/administration/admin_api/background_updates.html#status).
+
++# Upgrading to v1.130.0
++
++## Documented endpoint which can be delegated to a federation worker
++
++The endpoint `^/_matrix/federation/v1/version$` can be delegated to a federation
++worker. This is not new behaviour, but had not been documented yet. The
++[list of delegatable endpoints](workers.md#synapseappgeneric_worker) has
++been updated to include it. Make sure to check your reverse proxy rules if you
++are using workers.
++
+ # Upgrading to v1.126.0
+
+ ## Room list publication rules change
+diff --git a/docs/workers.md b/docs/workers.md
+index 9ebcc886b1..2597e78217 100644
+--- a/docs/workers.md
++++ b/docs/workers.md
+@@ -200,6 +200,7 @@ information.
+ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
+
+ # Federation requests
++ ^/_matrix/federation/v1/version$
+ ^/_matrix/federation/v1/event/
+ ^/_matrix/federation/v1/state/
+ ^/_matrix/federation/v1/state_ids/
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0023-Add-an-Admin-API-endpoint-to-fetch-scheduled-tasks-1.patch b/packages/overlays/matrix-synapse/patches/0023-Add-an-Admin-API-endpoint-to-fetch-scheduled-tasks-1.patch
new file mode 100644
index 0000000..c874ee0
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0023-Add-an-Admin-API-endpoint-to-fetch-scheduled-tasks-1.patch
@@ -0,0 +1,383 @@
+From 6dc1ecd35972c95ce62c5e0563245845c9c64e49 Mon Sep 17 00:00:00 2001
+From: Shay <hillerys@element.io>
+Date: Thu, 1 May 2025 11:30:00 -0700
+Subject: [PATCH 23/74] Add an Admin API endpoint to fetch scheduled tasks
+ (#18214)
+
+---
+ changelog.d/18214.feature | 1 +
+ docs/admin_api/scheduled_tasks.md | 54 +++++++
+ synapse/rest/admin/__init__.py | 2 +
+ synapse/rest/admin/scheduled_tasks.py | 70 +++++++++
+ tests/rest/admin/test_scheduled_tasks.py | 192 +++++++++++++++++++++++
+ 5 files changed, 319 insertions(+)
+ create mode 100644 changelog.d/18214.feature
+ create mode 100644 docs/admin_api/scheduled_tasks.md
+ create mode 100644 synapse/rest/admin/scheduled_tasks.py
+ create mode 100644 tests/rest/admin/test_scheduled_tasks.py
+
+diff --git a/changelog.d/18214.feature b/changelog.d/18214.feature
+new file mode 100644
+index 0000000000..751cb7d383
+--- /dev/null
++++ b/changelog.d/18214.feature
+@@ -0,0 +1 @@
++Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks.
+\ No newline at end of file
+diff --git a/docs/admin_api/scheduled_tasks.md b/docs/admin_api/scheduled_tasks.md
+new file mode 100644
+index 0000000000..1708871a6d
+--- /dev/null
++++ b/docs/admin_api/scheduled_tasks.md
+@@ -0,0 +1,54 @@
++# Show scheduled tasks
++
++This API returns information about scheduled tasks.
++
++To use it, you will need to authenticate by providing an `access_token`
++for a server admin: see [Admin API](../usage/administration/admin_api/).
++
++The api is:
++```
++GET /_synapse/admin/v1/scheduled_tasks
++```
++
++It returns a JSON body like the following:
++
++```json
++{
++ "scheduled_tasks": [
++ {
++ "id": "GSA124oegf1",
++ "action": "shutdown_room",
++ "status": "complete",
++ "timestamp": 23423523,
++ "resource_id": "!roomid",
++ "result": "some result",
++ "error": null
++ }
++ ]
++}
++```
++
++**Query parameters:**
++
++* `action_name`: string - Is optional. Returns only the scheduled tasks with the given action name.
++* `resource_id`: string - Is optional. Returns only the scheduled tasks with the given resource id.
++* `status`: string - Is optional. Returns only the scheduled tasks matching the given status, one of
++ - "scheduled" - Task is scheduled but not active
++ - "active" - Task is active and probably running, and if not will be run on next scheduler loop run
++ - "complete" - Task has completed successfully
++ - "failed" - Task is over and either returned a failed status, or had an exception
++
++* `max_timestamp`: int - Is optional. Returns only the scheduled tasks with a timestamp inferior to the specified one.
++
++**Response**
++
++The following fields are returned in the JSON response body along with a `200` HTTP status code:
++
++* `id`: string - ID of scheduled task.
++* `action`: string - The name of the scheduled task's action.
++* `status`: string - The status of the scheduled task.
++* `timestamp_ms`: integer - The timestamp (in milliseconds since the unix epoch) of the given task - If the status is "scheduled" then this represents when it should be launched.
++ Otherwise it represents the last time this task got a change of state.
++* `resource_id`: Optional string - The resource id of the scheduled task, if it possesses one
++* `result`: Optional Json - Any result of the scheduled task, if given
++* `error`: Optional string - If the task has the status "failed", the error associated with this failure
+diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
+index 5977ded4a0..cf809d1a27 100644
+--- a/synapse/rest/admin/__init__.py
++++ b/synapse/rest/admin/__init__.py
+@@ -86,6 +86,7 @@ from synapse.rest.admin.rooms import (
+ RoomStateRestServlet,
+ RoomTimestampToEventRestServlet,
+ )
++from synapse.rest.admin.scheduled_tasks import ScheduledTasksRestServlet
+ from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
+ from synapse.rest.admin.statistics import (
+ LargestRoomsStatistics,
+@@ -338,6 +339,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ BackgroundUpdateStartJobRestServlet(hs).register(http_server)
+ ExperimentalFeaturesRestServlet(hs).register(http_server)
+ SuspendAccountRestServlet(hs).register(http_server)
++ ScheduledTasksRestServlet(hs).register(http_server)
+
+
+ def register_servlets_for_client_rest_resource(
+diff --git a/synapse/rest/admin/scheduled_tasks.py b/synapse/rest/admin/scheduled_tasks.py
+new file mode 100644
+index 0000000000..2ae13021b9
+--- /dev/null
++++ b/synapse/rest/admin/scheduled_tasks.py
+@@ -0,0 +1,70 @@
++#
++# This file is licensed under the Affero General Public License (AGPL) version 3.
++#
++# Copyright (C) 2025 New Vector, Ltd
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU Affero General Public License as
++# published by the Free Software Foundation, either version 3 of the
++# License, or (at your option) any later version.
++#
++# See the GNU Affero General Public License for more details:
++# <https://www.gnu.org/licenses/agpl-3.0.html>.
++#
++#
++#
++from typing import TYPE_CHECKING, Tuple
++
++from synapse.http.servlet import RestServlet, parse_integer, parse_string
++from synapse.http.site import SynapseRequest
++from synapse.rest.admin import admin_patterns, assert_requester_is_admin
++from synapse.types import JsonDict, TaskStatus
++
++if TYPE_CHECKING:
++ from synapse.server import HomeServer
++
++
++class ScheduledTasksRestServlet(RestServlet):
++ """Get a list of scheduled tasks and their statuses
++ optionally filtered by action name, resource id, status, and max timestamp
++ """
++
++ PATTERNS = admin_patterns("/scheduled_tasks$")
++
++ def __init__(self, hs: "HomeServer"):
++ self._auth = hs.get_auth()
++ self._store = hs.get_datastores().main
++
++ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
++ await assert_requester_is_admin(self._auth, request)
++
++ # extract query params
++ action_name = parse_string(request, "action_name")
++ resource_id = parse_string(request, "resource_id")
++ status = parse_string(request, "job_status")
++ max_timestamp = parse_integer(request, "max_timestamp")
++
++ actions = [action_name] if action_name else None
++ statuses = [TaskStatus(status)] if status else None
++
++ tasks = await self._store.get_scheduled_tasks(
++ actions=actions,
++ resource_id=resource_id,
++ statuses=statuses,
++ max_timestamp=max_timestamp,
++ )
++
++ json_tasks = []
++ for task in tasks:
++ result_task = {
++ "id": task.id,
++ "action": task.action,
++ "status": task.status,
++ "timestamp_ms": task.timestamp,
++ "resource_id": task.resource_id,
++ "result": task.result,
++ "error": task.error,
++ }
++ json_tasks.append(result_task)
++
++ return 200, {"scheduled_tasks": json_tasks}
+diff --git a/tests/rest/admin/test_scheduled_tasks.py b/tests/rest/admin/test_scheduled_tasks.py
+new file mode 100644
+index 0000000000..9654e9322b
+--- /dev/null
++++ b/tests/rest/admin/test_scheduled_tasks.py
+@@ -0,0 +1,192 @@
++#
++# This file is licensed under the Affero General Public License (AGPL) version 3.
++#
++# Copyright (C) 2025 New Vector, Ltd
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU Affero General Public License as
++# published by the Free Software Foundation, either version 3 of the
++# License, or (at your option) any later version.
++#
++# See the GNU Affero General Public License for more details:
++# <https://www.gnu.org/licenses/agpl-3.0.html>.
++#
++#
++#
++from typing import Mapping, Optional, Tuple
++
++from twisted.test.proto_helpers import MemoryReactor
++
++import synapse.rest.admin
++from synapse.api.errors import Codes
++from synapse.rest.client import login
++from synapse.server import HomeServer
++from synapse.types import JsonMapping, ScheduledTask, TaskStatus
++from synapse.util import Clock
++
++from tests import unittest
++
++
++class ScheduledTasksAdminApiTestCase(unittest.HomeserverTestCase):
++ servlets = [
++ synapse.rest.admin.register_servlets,
++ login.register_servlets,
++ ]
++
++ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
++ self.store = hs.get_datastores().main
++ self.admin_user = self.register_user("admin", "pass", admin=True)
++ self.admin_user_tok = self.login("admin", "pass")
++ self._task_scheduler = hs.get_task_scheduler()
++
++ # create and schedule a few tasks
++ async def _test_task(
++ task: ScheduledTask,
++ ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
++ return TaskStatus.ACTIVE, None, None
++
++ async def _finished_test_task(
++ task: ScheduledTask,
++ ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
++ return TaskStatus.COMPLETE, None, None
++
++ async def _failed_test_task(
++ task: ScheduledTask,
++ ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
++ return TaskStatus.FAILED, None, "Everything failed"
++
++ self._task_scheduler.register_action(_test_task, "test_task")
++ self.get_success(
++ self._task_scheduler.schedule_task("test_task", resource_id="test")
++ )
++
++ self._task_scheduler.register_action(_finished_test_task, "finished_test_task")
++ self.get_success(
++ self._task_scheduler.schedule_task(
++ "finished_test_task", resource_id="finished_task"
++ )
++ )
++
++ self._task_scheduler.register_action(_failed_test_task, "failed_test_task")
++ self.get_success(
++ self._task_scheduler.schedule_task(
++ "failed_test_task", resource_id="failed_task"
++ )
++ )
++
++ def check_scheduled_tasks_response(self, scheduled_tasks: Mapping) -> list:
++ result = []
++ for task in scheduled_tasks:
++ if task["resource_id"] == "test":
++ self.assertEqual(task["status"], TaskStatus.ACTIVE)
++ self.assertEqual(task["action"], "test_task")
++ result.append(task)
++ if task["resource_id"] == "finished_task":
++ self.assertEqual(task["status"], TaskStatus.COMPLETE)
++ self.assertEqual(task["action"], "finished_test_task")
++ result.append(task)
++ if task["resource_id"] == "failed_task":
++ self.assertEqual(task["status"], TaskStatus.FAILED)
++ self.assertEqual(task["action"], "failed_test_task")
++ result.append(task)
++
++ return result
++
++ def test_requester_is_not_admin(self) -> None:
++ """
++ If the user is not a server admin, an error 403 is returned.
++ """
++
++ self.register_user("user", "pass", admin=False)
++ other_user_tok = self.login("user", "pass")
++
++ channel = self.make_request(
++ "GET",
++ "/_synapse/admin/v1/scheduled_tasks",
++ content={},
++ access_token=other_user_tok,
++ )
++
++ self.assertEqual(403, channel.code, msg=channel.json_body)
++ self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
++
++ def test_scheduled_tasks(self) -> None:
++ """
++ Test that endpoint returns scheduled tasks.
++ """
++
++ channel = self.make_request(
++ "GET",
++ "/_synapse/admin/v1/scheduled_tasks",
++ content={},
++ access_token=self.admin_user_tok,
++ )
++ self.assertEqual(200, channel.code, msg=channel.json_body)
++ scheduled_tasks = channel.json_body["scheduled_tasks"]
++
++ # make sure we got back all the scheduled tasks
++ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks)
++ self.assertEqual(len(found_tasks), 3)
++
++ def test_filtering_scheduled_tasks(self) -> None:
++ """
++ Test that filtering the scheduled tasks response via query params works as expected.
++ """
++ # filter via job_status
++ channel = self.make_request(
++ "GET",
++ "/_synapse/admin/v1/scheduled_tasks?job_status=active",
++ content={},
++ access_token=self.admin_user_tok,
++ )
++ self.assertEqual(200, channel.code, msg=channel.json_body)
++ scheduled_tasks = channel.json_body["scheduled_tasks"]
++ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks)
++
++ # only the active task should have been returned
++ self.assertEqual(len(found_tasks), 1)
++ self.assertEqual(found_tasks[0]["status"], "active")
++
++ # filter via action_name
++ channel = self.make_request(
++ "GET",
++ "/_synapse/admin/v1/scheduled_tasks?action_name=test_task",
++ content={},
++ access_token=self.admin_user_tok,
++ )
++ self.assertEqual(200, channel.code, msg=channel.json_body)
++ scheduled_tasks = channel.json_body["scheduled_tasks"]
++
++ # only test_task should have been returned
++ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks)
++ self.assertEqual(len(found_tasks), 1)
++ self.assertEqual(found_tasks[0]["action"], "test_task")
++
++ # filter via max_timestamp
++ channel = self.make_request(
++ "GET",
++ "/_synapse/admin/v1/scheduled_tasks?max_timestamp=0",
++ content={},
++ access_token=self.admin_user_tok,
++ )
++ self.assertEqual(200, channel.code, msg=channel.json_body)
++ scheduled_tasks = channel.json_body["scheduled_tasks"]
++ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks)
++
++ # none should have been returned
++ self.assertEqual(len(found_tasks), 0)
++
++ # filter via resource id
++ channel = self.make_request(
++ "GET",
++ "/_synapse/admin/v1/scheduled_tasks?resource_id=failed_task",
++ content={},
++ access_token=self.admin_user_tok,
++ )
++ self.assertEqual(200, channel.code, msg=channel.json_body)
++ scheduled_tasks = channel.json_body["scheduled_tasks"]
++ found_tasks = self.check_scheduled_tasks_response(scheduled_tasks)
++
++ # only the task with the matching resource id should have been returned
++ self.assertEqual(len(found_tasks), 1)
++ self.assertEqual(found_tasks[0]["resource_id"], "failed_task")
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0024-Readme-tweaks-18218.patch b/packages/overlays/matrix-synapse/patches/0024-Readme-tweaks-18218.patch
new file mode 100644
index 0000000..735681a
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0024-Readme-tweaks-18218.patch
@@ -0,0 +1,48 @@
+From f2ca2e31f7c5b1627554f85adead37212736bf5a Mon Sep 17 00:00:00 2001
+From: Andrew Ferrazzutti <andrewf@element.io>
+Date: Fri, 2 May 2025 06:11:48 -0400
+Subject: [PATCH 24/74] Readme tweaks (#18218)
+
+---
+ README.rst | 12 +++++++-----
+ changelog.d/18218.doc | 1 +
+ 2 files changed, 8 insertions(+), 5 deletions(-)
+ create mode 100644 changelog.d/18218.doc
+
+diff --git a/README.rst b/README.rst
+index 77f861e788..8974990ed1 100644
+--- a/README.rst
++++ b/README.rst
+@@ -253,15 +253,17 @@ Alongside all that, join our developer community on Matrix:
+ Copyright and Licensing
+ =======================
+
+-Copyright 2014-2017 OpenMarket Ltd
+-Copyright 2017 Vector Creations Ltd
+-Copyright 2017-2025 New Vector Ltd
++| Copyright 2014-2017 OpenMarket Ltd
++| Copyright 2017 Vector Creations Ltd
++| Copyright 2017-2025 New Vector Ltd
++|
+
+ This software is dual-licensed by New Vector Ltd (Element). It can be used either:
+-
++
+ (1) for free under the terms of the GNU Affero General Public License (as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version); OR
+-
++
+ (2) under the terms of a paid-for Element Commercial License agreement between you and Element (the terms of which may vary depending on what you and Element have agreed to).
++
+ Unless required by applicable law or agreed to in writing, software distributed under the Licenses is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the Licenses for the specific language governing permissions and limitations under the Licenses.
+
+
+diff --git a/changelog.d/18218.doc b/changelog.d/18218.doc
+new file mode 100644
+index 0000000000..f62da6a0b9
+--- /dev/null
++++ b/changelog.d/18218.doc
+@@ -0,0 +1 @@
++Improve formatting of the README file.
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0025-Do-not-auto-provision-missing-users-devices-when-del.patch b/packages/overlays/matrix-synapse/patches/0025-Do-not-auto-provision-missing-users-devices-when-del.patch
new file mode 100644
index 0000000..892a43f
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0025-Do-not-auto-provision-missing-users-devices-when-del.patch
@@ -0,0 +1,129 @@
+From 74be5cfdbc2208f0b34d9ab75f99994bd8ed217d Mon Sep 17 00:00:00 2001
+From: Quentin Gliech <quenting@element.io>
+Date: Fri, 2 May 2025 12:13:26 +0200
+Subject: [PATCH 25/74] Do not auto-provision missing users & devices when
+ delegating auth to MAS (#18181)
+
+Since MAS 0.13.0, the provisionning of devices and users is done
+synchronously and reliably enough that we don't need to auto-provision
+on the Synapse side anymore.
+
+It's important to remove this behaviour if we want to start caching
+token introspection results.
+---
+ changelog.d/18181.misc | 1 +
+ synapse/api/auth/msc3861_delegated.py | 39 +++++++------------------
+ tests/handlers/test_oauth_delegation.py | 10 +++++++
+ 3 files changed, 22 insertions(+), 28 deletions(-)
+ create mode 100644 changelog.d/18181.misc
+
+diff --git a/changelog.d/18181.misc b/changelog.d/18181.misc
+new file mode 100644
+index 0000000000..d9ba2f1dd1
+--- /dev/null
++++ b/changelog.d/18181.misc
+@@ -0,0 +1 @@
++Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later.
+diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py
+index 9ded3366e3..e500a06afe 100644
+--- a/synapse/api/auth/msc3861_delegated.py
++++ b/synapse/api/auth/msc3861_delegated.py
+@@ -39,7 +39,6 @@ from synapse.api.errors import (
+ HttpResponseException,
+ InvalidClientTokenError,
+ OAuthInsufficientScopeError,
+- StoreError,
+ SynapseError,
+ UnrecognizedRequestError,
+ )
+@@ -512,7 +511,7 @@ class MSC3861DelegatedAuth(BaseAuth):
+ raise InvalidClientTokenError("No scope in token granting user rights")
+
+ # Match via the sub claim
+- sub: Optional[str] = introspection_result.get_sub()
++ sub = introspection_result.get_sub()
+ if sub is None:
+ raise InvalidClientTokenError(
+ "Invalid sub claim in the introspection result"
+@@ -525,29 +524,20 @@ class MSC3861DelegatedAuth(BaseAuth):
+ # If we could not find a user via the external_id, it either does not exist,
+ # or the external_id was never recorded
+
+- # TODO: claim mapping should be configurable
+- username: Optional[str] = introspection_result.get_username()
+- if username is None or not isinstance(username, str):
++ username = introspection_result.get_username()
++ if username is None:
+ raise AuthError(
+ 500,
+ "Invalid username claim in the introspection result",
+ )
+ user_id = UserID(username, self._hostname)
+
+- # First try to find a user from the username claim
++ # Try to find a user from the username claim
+ user_info = await self.store.get_user_by_id(user_id=user_id.to_string())
+ if user_info is None:
+- # If the user does not exist, we should create it on the fly
+- # TODO: we could use SCIM to provision users ahead of time and listen
+- # for SCIM SET events if those ever become standard:
+- # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00
+-
+- # TODO: claim mapping should be configurable
+- # If present, use the name claim as the displayname
+- name: Optional[str] = introspection_result.get_name()
+-
+- await self.store.register_user(
+- user_id=user_id.to_string(), create_profile_with_displayname=name
++ raise AuthError(
++ 500,
++ "User not found",
+ )
+
+ # And record the sub as external_id
+@@ -587,17 +577,10 @@ class MSC3861DelegatedAuth(BaseAuth):
+ "Invalid device ID in introspection result",
+ )
+
+- # Create the device on the fly if it does not exist
+- try:
+- await self.store.get_device(
+- user_id=user_id.to_string(), device_id=device_id
+- )
+- except StoreError:
+- await self.store.store_device(
+- user_id=user_id.to_string(),
+- device_id=device_id,
+- initial_device_display_name="OIDC-native client",
+- )
++ # Make sure the device exists
++ await self.store.get_device(
++ user_id=user_id.to_string(), device_id=device_id
++ )
+
+ # TODO: there is a few things missing in the requester here, which still need
+ # to be figured out, like:
+diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py
+index 034a1594d9..934bfee0bc 100644
+--- a/tests/handlers/test_oauth_delegation.py
++++ b/tests/handlers/test_oauth_delegation.py
+@@ -147,6 +147,16 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
+
+ return hs
+
++ def prepare(
++ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
++ ) -> None:
++ # Provision the user and the device we use in the tests.
++ store = homeserver.get_datastores().main
++ self.get_success(store.register_user(USER_ID))
++ self.get_success(
++ store.store_device(USER_ID, DEVICE, initial_device_display_name=None)
++ )
++
+ def _assertParams(self) -> None:
+ """Assert that the request parameters are correct."""
+ params = parse_qs(self.http_client.request.call_args[1]["data"].decode("utf-8"))
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0026-Fix-typo-in-doc-for-Scheduled-Tasks-Admin-API-18384.patch b/packages/overlays/matrix-synapse/patches/0026-Fix-typo-in-doc-for-Scheduled-Tasks-Admin-API-18384.patch
new file mode 100644
index 0000000..4635d35
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0026-Fix-typo-in-doc-for-Scheduled-Tasks-Admin-API-18384.patch
@@ -0,0 +1,34 @@
+From ea376126a0b7e3fbc0df6ac827eba87d98e479de Mon Sep 17 00:00:00 2001
+From: Shay <hillerys@element.io>
+Date: Fri, 2 May 2025 04:14:31 -0700
+Subject: [PATCH 26/74] Fix typo in doc for Scheduled Tasks Admin API (#18384)
+
+---
+ changelog.d/18384.doc | 1 +
+ docs/admin_api/scheduled_tasks.md | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+ create mode 100644 changelog.d/18384.doc
+
+diff --git a/changelog.d/18384.doc b/changelog.d/18384.doc
+new file mode 100644
+index 0000000000..ebcd029639
+--- /dev/null
++++ b/changelog.d/18384.doc
+@@ -0,0 +1 @@
++Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks.
+diff --git a/docs/admin_api/scheduled_tasks.md b/docs/admin_api/scheduled_tasks.md
+index 1708871a6d..b80da5083c 100644
+--- a/docs/admin_api/scheduled_tasks.md
++++ b/docs/admin_api/scheduled_tasks.md
+@@ -19,7 +19,7 @@ It returns a JSON body like the following:
+ "id": "GSA124oegf1",
+ "action": "shutdown_room",
+ "status": "complete",
+- "timestamp": 23423523,
++ "timestamp_ms": 23423523,
+ "resource_id": "!roomid",
+ "result": "some result",
+ "error": null
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0027-Don-t-check-the-at_hash-access-token-hash-in-OIDC-ID.patch b/packages/overlays/matrix-synapse/patches/0027-Don-t-check-the-at_hash-access-token-hash-in-OIDC-ID.patch
new file mode 100644
index 0000000..53b5462
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0027-Don-t-check-the-at_hash-access-token-hash-in-OIDC-ID.patch
@@ -0,0 +1,177 @@
+From fd5d3d852df9dbbac13b406144be7ec5a807078d Mon Sep 17 00:00:00 2001
+From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+Date: Fri, 2 May 2025 12:16:14 +0100
+Subject: [PATCH 27/74] Don't check the `at_hash` (access token hash) in OIDC
+ ID Tokens if we don't use the access token (#18374)
+
+Co-authored-by: Eric Eastwood <erice@element.io>
+---
+ changelog.d/18374.misc | 1 +
+ synapse/handlers/oidc.py | 29 ++++++++++++++++++++++--
+ tests/handlers/test_oidc.py | 44 +++++++++++++++++++++++++++++++++++++
+ tests/test_utils/oidc.py | 19 ++++++++++++++--
+ 4 files changed, 89 insertions(+), 4 deletions(-)
+ create mode 100644 changelog.d/18374.misc
+
+diff --git a/changelog.d/18374.misc b/changelog.d/18374.misc
+new file mode 100644
+index 0000000000..a8efca68d0
+--- /dev/null
++++ b/changelog.d/18374.misc
+@@ -0,0 +1 @@
++Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token.
+\ No newline at end of file
+diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
+index c4cf0636a3..fb759172b3 100644
+--- a/synapse/handlers/oidc.py
++++ b/synapse/handlers/oidc.py
+@@ -586,6 +586,24 @@ class OidcProvider:
+ or self._user_profile_method == "userinfo_endpoint"
+ )
+
++ @property
++ def _uses_access_token(self) -> bool:
++ """Return True if the `access_token` will be used during the login process.
++
++ This is useful to determine whether the access token
++ returned by the identity provider, and
++ any related metadata (such as the `at_hash` field in
++ the ID token), should be validated.
++ """
++ # Currently, Synapse only uses the access_token to fetch user metadata
++ # from the userinfo endpoint. Therefore we only have a single criteria
++ # to check right now but this may change in the future and this function
++ # should be updated if more usages are introduced.
++ #
++ # For example, if we start to use the access_token given to us by the
++ # IdP for more things, such as accessing Resource Server APIs.
++ return self._uses_userinfo
++
+ @property
+ def issuer(self) -> str:
+ """The issuer identifying this provider."""
+@@ -957,9 +975,16 @@ class OidcProvider:
+ "nonce": nonce,
+ "client_id": self._client_auth.client_id,
+ }
+- if "access_token" in token:
++ if self._uses_access_token and "access_token" in token:
+ # If we got an `access_token`, there should be an `at_hash` claim
+- # in the `id_token` that we can check against.
++ # in the `id_token` that we can check against. Setting this
++ # instructs authlib to check the value of `at_hash` in the
++ # ID token.
++ #
++ # We only need to verify the access token if we actually make
++ # use of it. Which currently only happens when we need to fetch
++ # the user's information from the userinfo_endpoint. Thus, this
++ # check is also gated on self._uses_userinfo.
+ claims_params["access_token"] = token["access_token"]
+
+ claims_options = {"iss": {"values": [metadata["issuer"]]}}
+diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py
+index a7cead83d0..e5f31d57ca 100644
+--- a/tests/handlers/test_oidc.py
++++ b/tests/handlers/test_oidc.py
+@@ -1029,6 +1029,50 @@ class OidcHandlerTestCase(HomeserverTestCase):
+ args = parse_qs(kwargs["data"].decode("utf-8"))
+ self.assertEqual(args["redirect_uri"], [TEST_REDIRECT_URI])
+
++ @override_config(
++ {
++ "oidc_config": {
++ **DEFAULT_CONFIG,
++ "redirect_uri": TEST_REDIRECT_URI,
++ }
++ }
++ )
++ def test_code_exchange_ignores_access_token(self) -> None:
++ """
++ Code exchange completes successfully and doesn't validate the `at_hash`
++ (access token hash) field of an ID token when the access token isn't
++ going to be used.
++
++ The access token won't be used in this test because Synapse (currently)
++ only needs it to fetch a user's metadata if it isn't included in the ID
++ token itself.
++
++ Because we have included "openid" in the requested scopes for this IdP
++ (see `SCOPES`), user metadata is be included in the ID token. Thus the
++ access token isn't needed, and it's unnecessary for Synapse to validate
++ the access token.
++
++ This is a regression test for a situation where an upstream identity
++ provider was providing an invalid `at_hash` value, which Synapse errored
++ on, yet Synapse wasn't using the access token for anything.
++ """
++ # Exchange the code against the fake IdP.
++ userinfo = {
++ "sub": "foo",
++ "username": "foo",
++ "phone": "1234567",
++ }
++ with self.fake_server.id_token_override(
++ {
++ "at_hash": "invalid-hash",
++ }
++ ):
++ request, _ = self.start_authorization(userinfo)
++ self.get_success(self.handler.handle_oidc_callback(request))
++
++ # If no error was rendered, then we have success.
++ self.render_error.assert_not_called()
++
+ @override_config(
+ {
+ "oidc_config": {
+diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py
+index 6c4be1c1f8..5bf5e5cb0c 100644
+--- a/tests/test_utils/oidc.py
++++ b/tests/test_utils/oidc.py
+@@ -20,7 +20,9 @@
+ #
+
+
++import base64
+ import json
++from hashlib import sha256
+ from typing import Any, ContextManager, Dict, List, Optional, Tuple
+ from unittest.mock import Mock, patch
+ from urllib.parse import parse_qs
+@@ -154,10 +156,23 @@ class FakeOidcServer:
+ json_payload = json.dumps(payload)
+ return jws.serialize_compact(protected, json_payload, self._key).decode("utf-8")
+
+- def generate_id_token(self, grant: FakeAuthorizationGrant) -> str:
++ def generate_id_token(
++ self, grant: FakeAuthorizationGrant, access_token: str
++ ) -> str:
++ # Generate a hash of the access token for the optional
++ # `at_hash` field in an ID Token.
++ #
++ # 3.1.3.6. ID Token, https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
++ at_hash = (
++ base64.urlsafe_b64encode(sha256(access_token.encode("ascii")).digest()[:16])
++ .rstrip(b"=")
++ .decode("ascii")
++ )
++
+ now = int(self._clock.time())
+ id_token = {
+ **grant.userinfo,
++ "at_hash": at_hash,
+ "iss": self.issuer,
+ "aud": grant.client_id,
+ "iat": now,
+@@ -243,7 +258,7 @@ class FakeOidcServer:
+ }
+
+ if "openid" in grant.scope:
+- token["id_token"] = self.generate_id_token(grant)
++ token["id_token"] = self.generate_id_token(grant, access_token)
+
+ return dict(token)
+
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0028-Fix-lint-which-broke-in-18374-18385.patch b/packages/overlays/matrix-synapse/patches/0028-Fix-lint-which-broke-in-18374-18385.patch
new file mode 100644
index 0000000..63ea40e
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0028-Fix-lint-which-broke-in-18374-18385.patch
@@ -0,0 +1,37 @@
+From d18edf67d6f444c8dfa6a46e8769bbfa8d22f57b Mon Sep 17 00:00:00 2001
+From: Quentin Gliech <quenting@element.io>
+Date: Fri, 2 May 2025 14:07:23 +0200
+Subject: [PATCH 28/74] Fix lint which broke in #18374 (#18385)
+
+https://github.com/element-hq/synapse/pull/18374 did not pass linting
+but was merged
+---
+ changelog.d/18385.misc | 1 +
+ synapse/handlers/oidc.py | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+ create mode 100644 changelog.d/18385.misc
+
+diff --git a/changelog.d/18385.misc b/changelog.d/18385.misc
+new file mode 100644
+index 0000000000..a8efca68d0
+--- /dev/null
++++ b/changelog.d/18385.misc
+@@ -0,0 +1 @@
++Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token.
+\ No newline at end of file
+diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
+index fb759172b3..acf2d4bc8b 100644
+--- a/synapse/handlers/oidc.py
++++ b/synapse/handlers/oidc.py
+@@ -599,7 +599,7 @@ class OidcProvider:
+ # from the userinfo endpoint. Therefore we only have a single criteria
+ # to check right now but this may change in the future and this function
+ # should be updated if more usages are introduced.
+- #
++ #
+ # For example, if we start to use the access_token given to us by the
+ # IdP for more things, such as accessing Resource Server APIs.
+ return self._uses_userinfo
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0029-Apply-should_drop_federated_event-to-federation-invi.patch b/packages/overlays/matrix-synapse/patches/0029-Apply-should_drop_federated_event-to-federation-invi.patch
new file mode 100644
index 0000000..ddc4c02
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0029-Apply-should_drop_federated_event-to-federation-invi.patch
@@ -0,0 +1,54 @@
+From 411d239db47158cc14f94c94a86a5c713d783821 Mon Sep 17 00:00:00 2001
+From: Shay <hillerys@element.io>
+Date: Fri, 2 May 2025 06:04:01 -0700
+Subject: [PATCH 29/74] Apply `should_drop_federated_event` to federation
+ invites (#18330)
+
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18330.misc | 1 +
+ docs/modules/spam_checker_callbacks.md | 2 ++
+ synapse/federation/federation_server.py | 6 ++++++
+ 3 files changed, 9 insertions(+)
+ create mode 100644 changelog.d/18330.misc
+
+diff --git a/changelog.d/18330.misc b/changelog.d/18330.misc
+new file mode 100644
+index 0000000000..dcf341fa34
+--- /dev/null
++++ b/changelog.d/18330.misc
+@@ -0,0 +1 @@
++Apply `should_drop_federated_event` to federation invites.
+diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
+index c7f8606fd0..063099a127 100644
+--- a/docs/modules/spam_checker_callbacks.md
++++ b/docs/modules/spam_checker_callbacks.md
+@@ -353,6 +353,8 @@ callback returns `False`, Synapse falls through to the next one. The value of th
+ callback that does not return `False` will be used. If this happens, Synapse will not call
+ any of the subsequent implementations of this callback.
+
++Note that this check is applied to federation invites as of Synapse v1.130.0.
++
+
+ ### `check_login_for_spam`
+
+diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
+index f9e97ea13e..2f2c78babc 100644
+--- a/synapse/federation/federation_server.py
++++ b/synapse/federation/federation_server.py
+@@ -701,6 +701,12 @@ class FederationServer(FederationBase):
+ pdu = event_from_pdu_json(content, room_version)
+ origin_host, _ = parse_server_name(origin)
+ await self.check_server_matches_acl(origin_host, pdu.room_id)
++ if await self._spam_checker_module_callbacks.should_drop_federated_event(pdu):
++ logger.info(
++ "Federated event contains spam, dropping %s",
++ pdu.event_id,
++ )
++ raise SynapseError(403, Codes.FORBIDDEN)
+ try:
+ pdu = await self._check_sigs_and_hash(room_version, pdu)
+ except InvalidEventSignatureError as e:
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0030-Allow-a-few-admin-APIs-used-by-MAS-to-run-on-workers.patch b/packages/overlays/matrix-synapse/patches/0030-Allow-a-few-admin-APIs-used-by-MAS-to-run-on-workers.patch
new file mode 100644
index 0000000..1aecab2
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0030-Allow-a-few-admin-APIs-used-by-MAS-to-run-on-workers.patch
@@ -0,0 +1,699 @@
+From b8146d4b03d89a9407125b5934bd7accbe0680e0 Mon Sep 17 00:00:00 2001
+From: Quentin Gliech <quenting@element.io>
+Date: Fri, 2 May 2025 15:37:58 +0200
+Subject: [PATCH 30/74] Allow a few admin APIs used by MAS to run on workers
+ (#18313)
+
+This should be reviewed commit by commit.
+
+It adds a few admin servlets that are used by MAS when in delegation
+mode to workers
+
+---------
+
+Co-authored-by: Olivier 'reivilibre <oliverw@matrix.org>
+Co-authored-by: Devon Hudson <devon.dmytro@gmail.com>
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18313.misc | 1 +
+ docs/workers.md | 9 +
+ synapse/app/generic_worker.py | 18 +-
+ synapse/app/homeserver.py | 9 +-
+ synapse/handlers/set_password.py | 18 +-
+ synapse/rest/__init__.py | 1 -
+ synapse/rest/admin/__init__.py | 29 +-
+ synapse/rest/admin/devices.py | 26 +-
+ .../storage/databases/main/end_to_end_keys.py | 78 +++---
+ .../storage/databases/main/registration.py | 260 +++++++++---------
+ 10 files changed, 249 insertions(+), 200 deletions(-)
+ create mode 100644 changelog.d/18313.misc
+
+diff --git a/changelog.d/18313.misc b/changelog.d/18313.misc
+new file mode 100644
+index 0000000000..febf3ac06e
+--- /dev/null
++++ b/changelog.d/18313.misc
+@@ -0,0 +1 @@
++Allow a few admin APIs used by matrix-authentication-service to run on workers.
+diff --git a/docs/workers.md b/docs/workers.md
+index 2597e78217..45a00696f3 100644
+--- a/docs/workers.md
++++ b/docs/workers.md
+@@ -323,6 +323,15 @@ For multiple workers not handling the SSO endpoints properly, see
+ [#7530](https://github.com/matrix-org/synapse/issues/7530) and
+ [#9427](https://github.com/matrix-org/synapse/issues/9427).
+
++Additionally, when MSC3861 is enabled (`experimental_features.msc3861.enabled`
++set to `true`), the following endpoints can be handled by the worker:
++
++ ^/_synapse/admin/v2/users/[^/]+$
++ ^/_synapse/admin/v1/username_available$
++ ^/_synapse/admin/v1/users/[^/]+/_allow_cross_signing_replacement_without_uia$
++ # Only the GET method:
++ ^/_synapse/admin/v1/users/[^/]+/devices$
++
+ Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
+ with `client` and `federation` `resources` must be configured in the
+ [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
+diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
+index e4120ed424..f495d5b7e4 100644
+--- a/synapse/app/generic_worker.py
++++ b/synapse/app/generic_worker.py
+@@ -51,8 +51,7 @@ from synapse.http.server import JsonResource, OptionsResource
+ from synapse.logging.context import LoggingContext
+ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
+ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
+-from synapse.rest import ClientRestResource
+-from synapse.rest.admin import AdminRestResource, register_servlets_for_media_repo
++from synapse.rest import ClientRestResource, admin
+ from synapse.rest.health import HealthResource
+ from synapse.rest.key.v2 import KeyResource
+ from synapse.rest.synapse.client import build_synapse_client_resource_tree
+@@ -176,8 +175,13 @@ class GenericWorkerServer(HomeServer):
+ def _listen_http(self, listener_config: ListenerConfig) -> None:
+ assert listener_config.http_options is not None
+
+- # We always include a health resource.
+- resources: Dict[str, Resource] = {"/health": HealthResource()}
++ # We always include an admin resource that we populate with servlets as needed
++ admin_resource = JsonResource(self, canonical_json=False)
++ resources: Dict[str, Resource] = {
++ # We always include a health resource.
++ "/health": HealthResource(),
++ "/_synapse/admin": admin_resource,
++ }
+
+ for res in listener_config.http_options.resources:
+ for name in res.names:
+@@ -190,7 +194,7 @@ class GenericWorkerServer(HomeServer):
+
+ resources.update(build_synapse_client_resource_tree(self))
+ resources["/.well-known"] = well_known_resource(self)
+- resources["/_synapse/admin"] = AdminRestResource(self)
++ admin.register_servlets(self, admin_resource)
+
+ elif name == "federation":
+ resources[FEDERATION_PREFIX] = TransportLayerServer(self)
+@@ -200,15 +204,13 @@ class GenericWorkerServer(HomeServer):
+
+ # We need to serve the admin servlets for media on the
+ # worker.
+- admin_resource = JsonResource(self, canonical_json=False)
+- register_servlets_for_media_repo(self, admin_resource)
++ admin.register_servlets_for_media_repo(self, admin_resource)
+
+ resources.update(
+ {
+ MEDIA_R0_PREFIX: media_repo,
+ MEDIA_V3_PREFIX: media_repo,
+ LEGACY_MEDIA_PREFIX: media_repo,
+- "/_synapse/admin": admin_resource,
+ }
+ )
+
+diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
+index 2a824e8457..6da2194cf7 100644
+--- a/synapse/app/homeserver.py
++++ b/synapse/app/homeserver.py
+@@ -54,6 +54,7 @@ from synapse.config.server import ListenerConfig, TCPListenerConfig
+ from synapse.federation.transport.server import TransportLayerServer
+ from synapse.http.additional_resource import AdditionalResource
+ from synapse.http.server import (
++ JsonResource,
+ OptionsResource,
+ RootOptionsRedirectResource,
+ StaticResource,
+@@ -61,8 +62,7 @@ from synapse.http.server import (
+ from synapse.logging.context import LoggingContext
+ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
+ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
+-from synapse.rest import ClientRestResource
+-from synapse.rest.admin import AdminRestResource
++from synapse.rest import ClientRestResource, admin
+ from synapse.rest.health import HealthResource
+ from synapse.rest.key.v2 import KeyResource
+ from synapse.rest.synapse.client import build_synapse_client_resource_tree
+@@ -180,11 +180,14 @@ class SynapseHomeServer(HomeServer):
+ if compress:
+ client_resource = gz_wrap(client_resource)
+
++ admin_resource = JsonResource(self, canonical_json=False)
++ admin.register_servlets(self, admin_resource)
++
+ resources.update(
+ {
+ CLIENT_API_PREFIX: client_resource,
+ "/.well-known": well_known_resource(self),
+- "/_synapse/admin": AdminRestResource(self),
++ "/_synapse/admin": admin_resource,
+ **build_synapse_client_resource_tree(self),
+ }
+ )
+diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
+index 29cc03d71d..94301add9e 100644
+--- a/synapse/handlers/set_password.py
++++ b/synapse/handlers/set_password.py
+@@ -36,10 +36,17 @@ class SetPasswordHandler:
+ def __init__(self, hs: "HomeServer"):
+ self.store = hs.get_datastores().main
+ self._auth_handler = hs.get_auth_handler()
+- # This can only be instantiated on the main process.
+- device_handler = hs.get_device_handler()
+- assert isinstance(device_handler, DeviceHandler)
+- self._device_handler = device_handler
++
++ # We don't need the device handler if password changing is disabled.
++ # This allows us to instantiate the SetPasswordHandler on the workers
++ # that have admin APIs for MAS
++ if self._auth_handler.can_change_password():
++ # This can only be instantiated on the main process.
++ device_handler = hs.get_device_handler()
++ assert isinstance(device_handler, DeviceHandler)
++ self._device_handler: Optional[DeviceHandler] = device_handler
++ else:
++ self._device_handler = None
+
+ async def set_password(
+ self,
+@@ -51,6 +58,9 @@ class SetPasswordHandler:
+ if not self._auth_handler.can_change_password():
+ raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN)
+
++ # We should have this available only if password changing is enabled.
++ assert self._device_handler is not None
++
+ try:
+ await self.store.user_set_password_hash(user_id, password_hash)
+ except StoreError as e:
+diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
+index 2f1ef84e26..00f108de08 100644
+--- a/synapse/rest/__init__.py
++++ b/synapse/rest/__init__.py
+@@ -187,7 +187,6 @@ class ClientRestResource(JsonResource):
+ mutual_rooms.register_servlets,
+ login_token_request.register_servlets,
+ rendezvous.register_servlets,
+- auth_metadata.register_servlets,
+ ]:
+ continue
+
+diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
+index cf809d1a27..b1335fed66 100644
+--- a/synapse/rest/admin/__init__.py
++++ b/synapse/rest/admin/__init__.py
+@@ -39,7 +39,7 @@ from typing import TYPE_CHECKING, Optional, Tuple
+
+ from synapse.api.errors import Codes, NotFoundError, SynapseError
+ from synapse.handlers.pagination import PURGE_HISTORY_ACTION_NAME
+-from synapse.http.server import HttpServer, JsonResource
++from synapse.http.server import HttpServer
+ from synapse.http.servlet import RestServlet, parse_json_object_from_request
+ from synapse.http.site import SynapseRequest
+ from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
+@@ -51,6 +51,7 @@ from synapse.rest.admin.background_updates import (
+ from synapse.rest.admin.devices import (
+ DeleteDevicesRestServlet,
+ DeviceRestServlet,
++ DevicesGetRestServlet,
+ DevicesRestServlet,
+ )
+ from synapse.rest.admin.event_reports import (
+@@ -264,14 +265,6 @@ class PurgeHistoryStatusRestServlet(RestServlet):
+ ########################################################################################
+
+
+-class AdminRestResource(JsonResource):
+- """The REST resource which gets mounted at /_synapse/admin"""
+-
+- def __init__(self, hs: "HomeServer"):
+- JsonResource.__init__(self, hs, canonical_json=False)
+- register_servlets(hs, self)
+-
+-
+ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ """
+ Register all the admin servlets.
+@@ -280,6 +273,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+
+ # Admin servlets below may not work on workers.
+ if hs.config.worker.worker_app is not None:
++ # Some admin servlets can be mounted on workers when MSC3861 is enabled.
++ if hs.config.experimental.msc3861.enabled:
++ register_servlets_for_msc3861_delegation(hs, http_server)
++
+ return
+
+ register_servlets_for_client_rest_resource(hs, http_server)
+@@ -367,4 +364,16 @@ def register_servlets_for_client_rest_resource(
+ ListMediaInRoom(hs).register(http_server)
+
+ # don't add more things here: new servlets should only be exposed on
+- # /_synapse/admin so should not go here. Instead register them in AdminRestResource.
++ # /_synapse/admin so should not go here. Instead register them in register_servlets.
++
++
++def register_servlets_for_msc3861_delegation(
++ hs: "HomeServer", http_server: HttpServer
++) -> None:
++ """Register servlets needed by MAS when MSC3861 is enabled"""
++ assert hs.config.experimental.msc3861.enabled
++
++ UserRestServletV2(hs).register(http_server)
++ UsernameAvailableRestServlet(hs).register(http_server)
++ UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server)
++ DevicesGetRestServlet(hs).register(http_server)
+diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py
+index 449b066923..125ed8c491 100644
+--- a/synapse/rest/admin/devices.py
++++ b/synapse/rest/admin/devices.py
+@@ -113,18 +113,19 @@ class DeviceRestServlet(RestServlet):
+ return HTTPStatus.OK, {}
+
+
+-class DevicesRestServlet(RestServlet):
++class DevicesGetRestServlet(RestServlet):
+ """
+ Retrieve the given user's devices
++
++ This can be mounted on workers as it is read-only, as opposed
++ to `DevicesRestServlet`.
+ """
+
+ PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/devices$", "v2")
+
+ def __init__(self, hs: "HomeServer"):
+ self.auth = hs.get_auth()
+- handler = hs.get_device_handler()
+- assert isinstance(handler, DeviceHandler)
+- self.device_handler = handler
++ self.device_worker_handler = hs.get_device_handler()
+ self.store = hs.get_datastores().main
+ self.is_mine = hs.is_mine
+
+@@ -141,9 +142,24 @@ class DevicesRestServlet(RestServlet):
+ if u is None:
+ raise NotFoundError("Unknown user")
+
+- devices = await self.device_handler.get_devices_by_user(target_user.to_string())
++ devices = await self.device_worker_handler.get_devices_by_user(
++ target_user.to_string()
++ )
+ return HTTPStatus.OK, {"devices": devices, "total": len(devices)}
+
++
++class DevicesRestServlet(DevicesGetRestServlet):
++ """
++ Retrieve the given user's devices
++ """
++
++ PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/devices$", "v2")
++
++ def __init__(self, hs: "HomeServer"):
++ super().__init__(hs)
++ assert isinstance(self.device_worker_handler, DeviceHandler)
++ self.device_handler = self.device_worker_handler
++
+ async def on_POST(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
+diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
+index b4c7069958..341e7014d6 100644
+--- a/synapse/storage/databases/main/end_to_end_keys.py
++++ b/synapse/storage/databases/main/end_to_end_keys.py
+@@ -1501,6 +1501,45 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
+ "delete_old_otks_for_next_user_batch", impl
+ )
+
++ async def allow_master_cross_signing_key_replacement_without_uia(
++ self, user_id: str, duration_ms: int
++ ) -> Optional[int]:
++ """Mark this user's latest master key as being replaceable without UIA.
++
++ Said replacement will only be permitted for a short time after calling this
++ function. That time period is controlled by the duration argument.
++
++ Returns:
++ None, if there is no such key.
++ Otherwise, the timestamp before which replacement is allowed without UIA.
++ """
++ timestamp = self._clock.time_msec() + duration_ms
++
++ def impl(txn: LoggingTransaction) -> Optional[int]:
++ txn.execute(
++ """
++ UPDATE e2e_cross_signing_keys
++ SET updatable_without_uia_before_ms = ?
++ WHERE stream_id = (
++ SELECT stream_id
++ FROM e2e_cross_signing_keys
++ WHERE user_id = ? AND keytype = 'master'
++ ORDER BY stream_id DESC
++ LIMIT 1
++ )
++ """,
++ (timestamp, user_id),
++ )
++ if txn.rowcount == 0:
++ return None
++
++ return timestamp
++
++ return await self.db_pool.runInteraction(
++ "allow_master_cross_signing_key_replacement_without_uia",
++ impl,
++ )
++
+
+ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
+ def __init__(
+@@ -1755,42 +1794,3 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
+ ],
+ desc="add_e2e_signing_key",
+ )
+-
+- async def allow_master_cross_signing_key_replacement_without_uia(
+- self, user_id: str, duration_ms: int
+- ) -> Optional[int]:
+- """Mark this user's latest master key as being replaceable without UIA.
+-
+- Said replacement will only be permitted for a short time after calling this
+- function. That time period is controlled by the duration argument.
+-
+- Returns:
+- None, if there is no such key.
+- Otherwise, the timestamp before which replacement is allowed without UIA.
+- """
+- timestamp = self._clock.time_msec() + duration_ms
+-
+- def impl(txn: LoggingTransaction) -> Optional[int]:
+- txn.execute(
+- """
+- UPDATE e2e_cross_signing_keys
+- SET updatable_without_uia_before_ms = ?
+- WHERE stream_id = (
+- SELECT stream_id
+- FROM e2e_cross_signing_keys
+- WHERE user_id = ? AND keytype = 'master'
+- ORDER BY stream_id DESC
+- LIMIT 1
+- )
+- """,
+- (timestamp, user_id),
+- )
+- if txn.rowcount == 0:
+- return None
+-
+- return timestamp
+-
+- return await self.db_pool.runInteraction(
+- "allow_master_cross_signing_key_replacement_without_uia",
+- impl,
+- )
+diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
+index c43f31353b..1aeae951c5 100644
+--- a/synapse/storage/databases/main/registration.py
++++ b/synapse/storage/databases/main/registration.py
+@@ -2105,6 +2105,136 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
+ func=is_user_approved_txn,
+ )
+
++ async def set_user_deactivated_status(
++ self, user_id: str, deactivated: bool
++ ) -> None:
++ """Set the `deactivated` property for the provided user to the provided value.
++
++ Args:
++ user_id: The ID of the user to set the status for.
++ deactivated: The value to set for `deactivated`.
++ """
++
++ await self.db_pool.runInteraction(
++ "set_user_deactivated_status",
++ self.set_user_deactivated_status_txn,
++ user_id,
++ deactivated,
++ )
++
++ def set_user_deactivated_status_txn(
++ self, txn: LoggingTransaction, user_id: str, deactivated: bool
++ ) -> None:
++ self.db_pool.simple_update_one_txn(
++ txn=txn,
++ table="users",
++ keyvalues={"name": user_id},
++ updatevalues={"deactivated": 1 if deactivated else 0},
++ )
++ self._invalidate_cache_and_stream(
++ txn, self.get_user_deactivated_status, (user_id,)
++ )
++ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
++ self._invalidate_cache_and_stream(txn, self.is_guest, (user_id,))
++
++ async def set_user_suspended_status(self, user_id: str, suspended: bool) -> None:
++ """
++ Set whether the user's account is suspended in the `users` table.
++
++ Args:
++ user_id: The user ID of the user in question
++ suspended: True if the user is suspended, false if not
++ """
++ await self.db_pool.runInteraction(
++ "set_user_suspended_status",
++ self.set_user_suspended_status_txn,
++ user_id,
++ suspended,
++ )
++
++ def set_user_suspended_status_txn(
++ self, txn: LoggingTransaction, user_id: str, suspended: bool
++ ) -> None:
++ self.db_pool.simple_update_one_txn(
++ txn=txn,
++ table="users",
++ keyvalues={"name": user_id},
++ updatevalues={"suspended": suspended},
++ )
++ self._invalidate_cache_and_stream(
++ txn, self.get_user_suspended_status, (user_id,)
++ )
++ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
++
++ async def set_user_locked_status(self, user_id: str, locked: bool) -> None:
++ """Set the `locked` property for the provided user to the provided value.
++
++ Args:
++ user_id: The ID of the user to set the status for.
++ locked: The value to set for `locked`.
++ """
++
++ await self.db_pool.runInteraction(
++ "set_user_locked_status",
++ self.set_user_locked_status_txn,
++ user_id,
++ locked,
++ )
++
++ def set_user_locked_status_txn(
++ self, txn: LoggingTransaction, user_id: str, locked: bool
++ ) -> None:
++ self.db_pool.simple_update_one_txn(
++ txn=txn,
++ table="users",
++ keyvalues={"name": user_id},
++ updatevalues={"locked": locked},
++ )
++ self._invalidate_cache_and_stream(txn, self.get_user_locked_status, (user_id,))
++ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
++
++ async def update_user_approval_status(
++ self, user_id: UserID, approved: bool
++ ) -> None:
++ """Set the user's 'approved' flag to the given value.
++
++ The boolean will be turned into an int (in update_user_approval_status_txn)
++ because the column is a smallint.
++
++ Args:
++ user_id: the user to update the flag for.
++ approved: the value to set the flag to.
++ """
++ await self.db_pool.runInteraction(
++ "update_user_approval_status",
++ self.update_user_approval_status_txn,
++ user_id.to_string(),
++ approved,
++ )
++
++ def update_user_approval_status_txn(
++ self, txn: LoggingTransaction, user_id: str, approved: bool
++ ) -> None:
++ """Set the user's 'approved' flag to the given value.
++
++ The boolean is turned into an int because the column is a smallint.
++
++ Args:
++ txn: the current database transaction.
++ user_id: the user to update the flag for.
++ approved: the value to set the flag to.
++ """
++ self.db_pool.simple_update_one_txn(
++ txn=txn,
++ table="users",
++ keyvalues={"name": user_id},
++ updatevalues={"approved": approved},
++ )
++
++ # Invalidate the caches of methods that read the value of the 'approved' flag.
++ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
++ self._invalidate_cache_and_stream(txn, self.is_user_approved, (user_id,))
++
+
+ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
+ def __init__(
+@@ -2217,117 +2347,6 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
+
+ return nb_processed
+
+- async def set_user_deactivated_status(
+- self, user_id: str, deactivated: bool
+- ) -> None:
+- """Set the `deactivated` property for the provided user to the provided value.
+-
+- Args:
+- user_id: The ID of the user to set the status for.
+- deactivated: The value to set for `deactivated`.
+- """
+-
+- await self.db_pool.runInteraction(
+- "set_user_deactivated_status",
+- self.set_user_deactivated_status_txn,
+- user_id,
+- deactivated,
+- )
+-
+- def set_user_deactivated_status_txn(
+- self, txn: LoggingTransaction, user_id: str, deactivated: bool
+- ) -> None:
+- self.db_pool.simple_update_one_txn(
+- txn=txn,
+- table="users",
+- keyvalues={"name": user_id},
+- updatevalues={"deactivated": 1 if deactivated else 0},
+- )
+- self._invalidate_cache_and_stream(
+- txn, self.get_user_deactivated_status, (user_id,)
+- )
+- self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
+- txn.call_after(self.is_guest.invalidate, (user_id,))
+-
+- async def set_user_suspended_status(self, user_id: str, suspended: bool) -> None:
+- """
+- Set whether the user's account is suspended in the `users` table.
+-
+- Args:
+- user_id: The user ID of the user in question
+- suspended: True if the user is suspended, false if not
+- """
+- await self.db_pool.runInteraction(
+- "set_user_suspended_status",
+- self.set_user_suspended_status_txn,
+- user_id,
+- suspended,
+- )
+-
+- def set_user_suspended_status_txn(
+- self, txn: LoggingTransaction, user_id: str, suspended: bool
+- ) -> None:
+- self.db_pool.simple_update_one_txn(
+- txn=txn,
+- table="users",
+- keyvalues={"name": user_id},
+- updatevalues={"suspended": suspended},
+- )
+- self._invalidate_cache_and_stream(
+- txn, self.get_user_suspended_status, (user_id,)
+- )
+- self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
+-
+- async def set_user_locked_status(self, user_id: str, locked: bool) -> None:
+- """Set the `locked` property for the provided user to the provided value.
+-
+- Args:
+- user_id: The ID of the user to set the status for.
+- locked: The value to set for `locked`.
+- """
+-
+- await self.db_pool.runInteraction(
+- "set_user_locked_status",
+- self.set_user_locked_status_txn,
+- user_id,
+- locked,
+- )
+-
+- def set_user_locked_status_txn(
+- self, txn: LoggingTransaction, user_id: str, locked: bool
+- ) -> None:
+- self.db_pool.simple_update_one_txn(
+- txn=txn,
+- table="users",
+- keyvalues={"name": user_id},
+- updatevalues={"locked": locked},
+- )
+- self._invalidate_cache_and_stream(txn, self.get_user_locked_status, (user_id,))
+- self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
+-
+- def update_user_approval_status_txn(
+- self, txn: LoggingTransaction, user_id: str, approved: bool
+- ) -> None:
+- """Set the user's 'approved' flag to the given value.
+-
+- The boolean is turned into an int because the column is a smallint.
+-
+- Args:
+- txn: the current database transaction.
+- user_id: the user to update the flag for.
+- approved: the value to set the flag to.
+- """
+- self.db_pool.simple_update_one_txn(
+- txn=txn,
+- table="users",
+- keyvalues={"name": user_id},
+- updatevalues={"approved": approved},
+- )
+-
+- # Invalidate the caches of methods that read the value of the 'approved' flag.
+- self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
+- self._invalidate_cache_and_stream(txn, self.is_user_approved, (user_id,))
+-
+
+ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
+ def __init__(
+@@ -2956,25 +2975,6 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
+ start_or_continue_validation_session_txn,
+ )
+
+- async def update_user_approval_status(
+- self, user_id: UserID, approved: bool
+- ) -> None:
+- """Set the user's 'approved' flag to the given value.
+-
+- The boolean will be turned into an int (in update_user_approval_status_txn)
+- because the column is a smallint.
+-
+- Args:
+- user_id: the user to update the flag for.
+- approved: the value to set the flag to.
+- """
+- await self.db_pool.runInteraction(
+- "update_user_approval_status",
+- self.update_user_approval_status_txn,
+- user_id.to_string(),
+- approved,
+- )
+-
+ @wrap_as_background_process("delete_expired_login_tokens")
+ async def _delete_expired_login_tokens(self) -> None:
+ """Remove login tokens with expiry dates that have passed."""
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0031-Add-the-ability-to-exclude-remote-users-in-user-dire.patch b/packages/overlays/matrix-synapse/patches/0031-Add-the-ability-to-exclude-remote-users-in-user-dire.patch
new file mode 100644
index 0000000..b751f04
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0031-Add-the-ability-to-exclude-remote-users-in-user-dire.patch
@@ -0,0 +1,247 @@
+From fe8bb620de8e5830328c6d23127657560f449af0 Mon Sep 17 00:00:00 2001
+From: Will Lewis <1543626+wrjlewis@users.noreply.github.com>
+Date: Fri, 2 May 2025 15:38:02 +0100
+Subject: [PATCH 31/74] Add the ability to exclude remote users in user
+ directory search results (#18300)
+
+This change adds a new configuration
+`user_directory.exclude_remote_users`, which defaults to False.
+When set to True, remote users will not appear in user directory search
+results.
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [x] Pull request is based on the develop branch
+* [x] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [x] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+
+---------
+
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18300.feature | 1 +
+ .../configuration/config_documentation.md | 2 +
+ synapse/config/user_directory.py | 3 +
+ synapse/handlers/user_directory.py | 3 +
+ .../storage/databases/main/user_directory.py | 18 ++++--
+ tests/handlers/test_user_directory.py | 61 +++++++++++++++++++
+ 6 files changed, 84 insertions(+), 4 deletions(-)
+ create mode 100644 changelog.d/18300.feature
+
+diff --git a/changelog.d/18300.feature b/changelog.d/18300.feature
+new file mode 100644
+index 0000000000..92bea77556
+--- /dev/null
++++ b/changelog.d/18300.feature
+@@ -0,0 +1 @@
++Add config option `user_directory.exclude_remote_users` which, when enabled, excludes remote users from user directory search results.
+\ No newline at end of file
+diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
+index 19dc9dd356..5351bef83a 100644
+--- a/docs/usage/configuration/config_documentation.md
++++ b/docs/usage/configuration/config_documentation.md
+@@ -4095,6 +4095,7 @@ This option has the following sub-options:
+ * `prefer_local_users`: Defines whether to prefer local users in search query results.
+ If set to true, local users are more likely to appear above remote users when searching the
+ user directory. Defaults to false.
++* `exclude_remote_users`: If set to true, the search will only return local users. Defaults to false.
+ * `show_locked_users`: Defines whether to show locked users in search query results. Defaults to false.
+
+ Example configuration:
+@@ -4103,6 +4104,7 @@ user_directory:
+ enabled: false
+ search_all_users: true
+ prefer_local_users: true
++ exclude_remote_users: false
+ show_locked_users: true
+ ```
+ ---
+diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
+index c67796906f..fe4e2dc65c 100644
+--- a/synapse/config/user_directory.py
++++ b/synapse/config/user_directory.py
+@@ -38,6 +38,9 @@ class UserDirectoryConfig(Config):
+ self.user_directory_search_all_users = user_directory_config.get(
+ "search_all_users", False
+ )
++ self.user_directory_exclude_remote_users = user_directory_config.get(
++ "exclude_remote_users", False
++ )
+ self.user_directory_search_prefer_local_users = user_directory_config.get(
+ "prefer_local_users", False
+ )
+diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
+index f88d39b38f..33edef5f14 100644
+--- a/synapse/handlers/user_directory.py
++++ b/synapse/handlers/user_directory.py
+@@ -108,6 +108,9 @@ class UserDirectoryHandler(StateDeltasHandler):
+ self.is_mine_id = hs.is_mine_id
+ self.update_user_directory = hs.config.worker.should_update_user_directory
+ self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
++ self.exclude_remote_users = (
++ hs.config.userdirectory.user_directory_exclude_remote_users
++ )
+ self.show_locked_users = hs.config.userdirectory.show_locked_users
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
+ self._hs = hs
+diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
+index d6cd0774a8..391f0dd638 100644
+--- a/synapse/storage/databases/main/user_directory.py
++++ b/synapse/storage/databases/main/user_directory.py
+@@ -1037,11 +1037,11 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
+ }
+ """
+
++ join_args: Tuple[str, ...] = (user_id,)
++
+ if self.hs.config.userdirectory.user_directory_search_all_users:
+- join_args = (user_id,)
+ where_clause = "user_id != ?"
+ else:
+- join_args = (user_id,)
+ where_clause = """
+ (
+ EXISTS (select 1 from users_in_public_rooms WHERE user_id = t.user_id)
+@@ -1055,6 +1055,14 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
+ if not show_locked_users:
+ where_clause += " AND (u.locked IS NULL OR u.locked = FALSE)"
+
++ # Adjust the JOIN type based on the exclude_remote_users flag (the users
++ # table only contains local users so an inner join is a good way to
++ # to exclude remote users)
++ if self.hs.config.userdirectory.user_directory_exclude_remote_users:
++ join_type = "JOIN"
++ else:
++ join_type = "LEFT JOIN"
++
+ # We allow manipulating the ranking algorithm by injecting statements
+ # based on config options.
+ additional_ordering_statements = []
+@@ -1086,7 +1094,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
+ SELECT d.user_id AS user_id, display_name, avatar_url
+ FROM matching_users as t
+ INNER JOIN user_directory AS d USING (user_id)
+- LEFT JOIN users AS u ON t.user_id = u.name
++ %(join_type)s users AS u ON t.user_id = u.name
+ WHERE
+ %(where_clause)s
+ ORDER BY
+@@ -1115,6 +1123,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
+ """ % {
+ "where_clause": where_clause,
+ "order_case_statements": " ".join(additional_ordering_statements),
++ "join_type": join_type,
+ }
+ args = (
+ (full_query,)
+@@ -1142,7 +1151,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
+ SELECT d.user_id AS user_id, display_name, avatar_url
+ FROM user_directory_search as t
+ INNER JOIN user_directory AS d USING (user_id)
+- LEFT JOIN users AS u ON t.user_id = u.name
++ %(join_type)s users AS u ON t.user_id = u.name
+ WHERE
+ %(where_clause)s
+ AND value MATCH ?
+@@ -1155,6 +1164,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
+ """ % {
+ "where_clause": where_clause,
+ "order_statements": " ".join(additional_ordering_statements),
++ "join_type": join_type,
+ }
+ args = join_args + (search_query,) + ordering_arguments + (limit + 1,)
+ else:
+diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
+index a75095a79f..a9e9d7d7ea 100644
+--- a/tests/handlers/test_user_directory.py
++++ b/tests/handlers/test_user_directory.py
+@@ -992,6 +992,67 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
+ [self.assertIn(user, local_users) for user in received_user_id_ordering[:3]]
+ [self.assertIn(user, remote_users) for user in received_user_id_ordering[3:]]
+
++ @override_config(
++ {
++ "user_directory": {
++ "enabled": True,
++ "search_all_users": True,
++ "exclude_remote_users": True,
++ }
++ }
++ )
++ def test_exclude_remote_users(self) -> None:
++ """Tests that only local users are returned when
++ user_directory.exclude_remote_users is True.
++ """
++
++ # Create a room and few users to test the directory with
++ searching_user = self.register_user("searcher", "password")
++ searching_user_tok = self.login("searcher", "password")
++
++ room_id = self.helper.create_room_as(
++ searching_user,
++ room_version=RoomVersions.V1.identifier,
++ tok=searching_user_tok,
++ )
++
++ # Create a few local users and join them to the room
++ local_user_1 = self.register_user("user_xxxxx", "password")
++ local_user_2 = self.register_user("user_bbbbb", "password")
++ local_user_3 = self.register_user("user_zzzzz", "password")
++
++ self._add_user_to_room(room_id, RoomVersions.V1, local_user_1)
++ self._add_user_to_room(room_id, RoomVersions.V1, local_user_2)
++ self._add_user_to_room(room_id, RoomVersions.V1, local_user_3)
++
++ # Create a few "remote" users and join them to the room
++ remote_user_1 = "@user_aaaaa:remote_server"
++ remote_user_2 = "@user_yyyyy:remote_server"
++ remote_user_3 = "@user_ccccc:remote_server"
++ self._add_user_to_room(room_id, RoomVersions.V1, remote_user_1)
++ self._add_user_to_room(room_id, RoomVersions.V1, remote_user_2)
++ self._add_user_to_room(room_id, RoomVersions.V1, remote_user_3)
++
++ local_users = [local_user_1, local_user_2, local_user_3]
++ remote_users = [remote_user_1, remote_user_2, remote_user_3]
++
++ # The local searching user searches for the term "user", which other users have
++ # in their user id
++ results = self.get_success(
++ self.handler.search_users(searching_user, "user", 20)
++ )["results"]
++ received_user_ids = [result["user_id"] for result in results]
++
++ for user in local_users:
++ self.assertIn(
++ user, received_user_ids, f"Local user {user} not found in results"
++ )
++
++ for user in remote_users:
++ self.assertNotIn(
++ user, received_user_ids, f"Remote user {user} should not be in results"
++ )
++
+ def _add_user_to_room(
+ self,
+ room_id: str,
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0032-Return-specific-error-code-when-email-phone-not-supp.patch b/packages/overlays/matrix-synapse/patches/0032-Return-specific-error-code-when-email-phone-not-supp.patch
new file mode 100644
index 0000000..22df408
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0032-Return-specific-error-code-when-email-phone-not-supp.patch
@@ -0,0 +1,118 @@
+From 9f9eb563339079ee5ce082fcd63d0ab5d849b7ed Mon Sep 17 00:00:00 2001
+From: David Baker <dbkr@users.noreply.github.com>
+Date: Mon, 5 May 2025 10:08:50 +0100
+Subject: [PATCH 32/74] Return specific error code when email / phone not
+ supported (#17578)
+
+Implements https://github.com/matrix-org/matrix-spec-proposals/pull/4178
+
+If this would need tests, could you give some idea of what tests would
+be needed and how best to add them?
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [ ] Pull request is based on the develop branch
+* [ ] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [ ] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+---
+ changelog.d/17578.misc | 1 +
+ synapse/api/errors.py | 1 +
+ synapse/rest/client/account.py | 6 +++++-
+ synapse/util/msisdn.py | 4 ++--
+ 4 files changed, 9 insertions(+), 3 deletions(-)
+ create mode 100644 changelog.d/17578.misc
+
+diff --git a/changelog.d/17578.misc b/changelog.d/17578.misc
+new file mode 100644
+index 0000000000..7bf69576cd
+--- /dev/null
++++ b/changelog.d/17578.misc
+@@ -0,0 +1 @@
++Return specific error code when adding an email address / phone number to account is not supported (MSC4178).
+diff --git a/synapse/api/errors.py b/synapse/api/errors.py
+index 5dd6e84289..edd2073384 100644
+--- a/synapse/api/errors.py
++++ b/synapse/api/errors.py
+@@ -70,6 +70,7 @@ class Codes(str, Enum):
+ THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
+ THREEPID_DENIED = "M_THREEPID_DENIED"
+ INVALID_USERNAME = "M_INVALID_USERNAME"
++ THREEPID_MEDIUM_NOT_SUPPORTED = "M_THREEPID_MEDIUM_NOT_SUPPORTED"
+ SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
+ CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
+ CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
+diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
+index 59dbad3582..7d6c0afd9a 100644
+--- a/synapse/rest/client/account.py
++++ b/synapse/rest/client/account.py
+@@ -350,6 +350,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
+ raise SynapseError(
+ 400,
+ "Adding an email to your account is disabled on this server",
++ Codes.THREEPID_MEDIUM_NOT_SUPPORTED,
+ )
+
+ body = parse_and_validate_json_object_from_request(
+@@ -456,6 +457,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
+ raise SynapseError(
+ 400,
+ "Adding phone numbers to user account is not supported by this homeserver",
++ Codes.THREEPID_MEDIUM_NOT_SUPPORTED,
+ )
+
+ ret = await self.identity_handler.requestMsisdnToken(
+@@ -498,7 +500,9 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
+ "Adding emails have been disabled due to lack of an email config"
+ )
+ raise SynapseError(
+- 400, "Adding an email to your account is disabled on this server"
++ 400,
++ "Adding an email to your account is disabled on this server",
++ Codes.THREEPID_MEDIUM_NOT_SUPPORTED,
+ )
+
+ sid = parse_string(request, "sid", required=True)
+diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py
+index b6a784f0bc..dce8da5e18 100644
+--- a/synapse/util/msisdn.py
++++ b/synapse/util/msisdn.py
+@@ -21,7 +21,7 @@
+
+ import phonenumbers
+
+-from synapse.api.errors import SynapseError
++from synapse.api.errors import Codes, SynapseError
+
+
+ def phone_number_to_msisdn(country: str, number: str) -> str:
+@@ -45,7 +45,7 @@ def phone_number_to_msisdn(country: str, number: str) -> str:
+ try:
+ phoneNumber = phonenumbers.parse(number, country)
+ except phonenumbers.NumberParseException:
+- raise SynapseError(400, "Unable to parse phone number")
++ raise SynapseError(400, "Unable to parse phone number", Codes.INVALID_PARAM)
+ return phonenumbers.format_number(phoneNumber, phonenumbers.PhoneNumberFormat.E164)[
+ 1:
+ ]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0033-make-tests-tolerant-to-authlib-1.5.2-error-messages-.patch b/packages/overlays/matrix-synapse/patches/0033-make-tests-tolerant-to-authlib-1.5.2-error-messages-.patch
new file mode 100644
index 0000000..7a3e7df
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0033-make-tests-tolerant-to-authlib-1.5.2-error-messages-.patch
@@ -0,0 +1,118 @@
+From c9adbc6a1ce6039b1c04ae3298e463a3e3b25c38 Mon Sep 17 00:00:00 2001
+From: Florian Klink <flokli@flokli.de>
+Date: Mon, 5 May 2025 13:09:39 +0300
+Subject: [PATCH 33/74] make tests tolerant to authlib 1.5.2 error messages
+ (#18390)
+
+authlib 1.5.2 now single-quotes error messages in the claims, causing
+three tests to fail.
+
+Replace the comparison with a regex that accepts both single or double
+quotes.
+
+This succeeds the tests with both authlib 1.5.1 and 1.5.2.
+
+See https://github.com/NixOS/nixpkgs/pull/402797 for context.
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [x] Pull request is based on the develop branch
+* [x] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [x] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+---
+ changelog.d/18390.misc | 1 +
+ tests/rest/client/test_login.py | 20 ++++++++++----------
+ 2 files changed, 11 insertions(+), 10 deletions(-)
+ create mode 100644 changelog.d/18390.misc
+
+diff --git a/changelog.d/18390.misc b/changelog.d/18390.misc
+new file mode 100644
+index 0000000000..e9a08dcfbf
+--- /dev/null
++++ b/changelog.d/18390.misc
+@@ -0,0 +1 @@
++Fixed test failures when using authlib 1.5.2.
+diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
+index d7148917d0..c5c6604667 100644
+--- a/tests/rest/client/test_login.py
++++ b/tests/rest/client/test_login.py
+@@ -1262,18 +1262,18 @@ class JWTTestCase(unittest.HomeserverTestCase):
+ channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
+ self.assertEqual(channel.code, 403, msg=channel.result)
+ self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+- self.assertEqual(
++ self.assertRegex(
+ channel.json_body["error"],
+- 'JWT validation failed: invalid_claim: Invalid claim "iss"',
++ r"^JWT validation failed: invalid_claim: Invalid claim [\"']iss[\"']$",
+ )
+
+ # Not providing an issuer.
+ channel = self.jwt_login({"sub": "kermit"})
+ self.assertEqual(channel.code, 403, msg=channel.result)
+ self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+- self.assertEqual(
++ self.assertRegex(
+ channel.json_body["error"],
+- 'JWT validation failed: missing_claim: Missing "iss" claim',
++ r"^JWT validation failed: missing_claim: Missing [\"']iss[\"'] claim$",
+ )
+
+ def test_login_iss_no_config(self) -> None:
+@@ -1294,18 +1294,18 @@ class JWTTestCase(unittest.HomeserverTestCase):
+ channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
+ self.assertEqual(channel.code, 403, msg=channel.result)
+ self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+- self.assertEqual(
++ self.assertRegex(
+ channel.json_body["error"],
+- 'JWT validation failed: invalid_claim: Invalid claim "aud"',
++ r"^JWT validation failed: invalid_claim: Invalid claim [\"']aud[\"']$",
+ )
+
+ # Not providing an audience.
+ channel = self.jwt_login({"sub": "kermit"})
+ self.assertEqual(channel.code, 403, msg=channel.result)
+ self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+- self.assertEqual(
++ self.assertRegex(
+ channel.json_body["error"],
+- 'JWT validation failed: missing_claim: Missing "aud" claim',
++ r"^JWT validation failed: missing_claim: Missing [\"']aud[\"'] claim$",
+ )
+
+ def test_login_aud_no_config(self) -> None:
+@@ -1313,9 +1313,9 @@ class JWTTestCase(unittest.HomeserverTestCase):
+ channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
+ self.assertEqual(channel.code, 403, msg=channel.result)
+ self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+- self.assertEqual(
++ self.assertRegex(
+ channel.json_body["error"],
+- 'JWT validation failed: invalid_claim: Invalid claim "aud"',
++ r"^JWT validation failed: invalid_claim: Invalid claim [\"']aud[\"']$",
+ )
+
+ def test_login_default_sub(self) -> None:
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0034-Ensure-the-url-previewer-also-hashes-and-quarantines.patch b/packages/overlays/matrix-synapse/patches/0034-Ensure-the-url-previewer-also-hashes-and-quarantines.patch
new file mode 100644
index 0000000..62f579f
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0034-Ensure-the-url-previewer-also-hashes-and-quarantines.patch
@@ -0,0 +1,87 @@
+From d0873d549a8cce720a7842919126d78b4d9d030d Mon Sep 17 00:00:00 2001
+From: Will Hunt <will@half-shot.uk>
+Date: Tue, 6 May 2025 11:04:31 +0100
+Subject: [PATCH 34/74] Ensure the url previewer also hashes and quarantines
+ media (#18297)
+
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18297.misc | 1 +
+ synapse/media/media_repository.py | 1 -
+ synapse/media/url_previewer.py | 17 ++++++++++++++---
+ 3 files changed, 15 insertions(+), 4 deletions(-)
+ create mode 100644 changelog.d/18297.misc
+
+diff --git a/changelog.d/18297.misc b/changelog.d/18297.misc
+new file mode 100644
+index 0000000000..5032d48174
+--- /dev/null
++++ b/changelog.d/18297.misc
+@@ -0,0 +1 @@
++Apply file hashing and existing quarantines to media downloaded for URL previews.
+diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
+index 859b30e029..18c5a8ecec 100644
+--- a/synapse/media/media_repository.py
++++ b/synapse/media/media_repository.py
+@@ -378,7 +378,6 @@ class MediaRepository:
+ media_length=content_length,
+ user_id=auth_user,
+ sha256=sha256,
+- # TODO: Better name?
+ quarantined_by="system" if should_quarantine else None,
+ )
+
+diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py
+index 2e65a04789..8ef2b3f0c0 100644
+--- a/synapse/media/url_previewer.py
++++ b/synapse/media/url_previewer.py
+@@ -41,7 +41,7 @@ from synapse.api.errors import Codes, SynapseError
+ from synapse.http.client import SimpleHttpClient
+ from synapse.logging.context import make_deferred_yieldable, run_in_background
+ from synapse.media._base import FileInfo, get_filename_from_headers
+-from synapse.media.media_storage import MediaStorage
++from synapse.media.media_storage import MediaStorage, SHA256TransparentIOWriter
+ from synapse.media.oembed import OEmbedProvider
+ from synapse.media.preview_html import decode_body, parse_html_to_open_graph
+ from synapse.metrics.background_process_metrics import run_as_background_process
+@@ -593,17 +593,26 @@ class UrlPreviewer:
+ file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
+
+ async with self.media_storage.store_into_file(file_info) as (f, fname):
++ sha256writer = SHA256TransparentIOWriter(f)
+ if url.startswith("data:"):
+ if not allow_data_urls:
+ raise SynapseError(
+ 500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN
+ )
+
+- download_result = await self._parse_data_url(url, f)
++ download_result = await self._parse_data_url(url, sha256writer.wrap())
+ else:
+- download_result = await self._download_url(url, f)
++ download_result = await self._download_url(url, sha256writer.wrap())
+
+ try:
++ sha256 = sha256writer.hexdigest()
++ should_quarantine = await self.store.get_is_hash_quarantined(sha256)
++
++ if should_quarantine:
++ logger.warn(
++ "Media has been automatically quarantined as it matched existing quarantined media"
++ )
++
+ time_now_ms = self.clock.time_msec()
+
+ await self.store.store_local_media(
+@@ -614,6 +623,8 @@ class UrlPreviewer:
+ media_length=download_result.length,
+ user_id=user,
+ url_cache=url,
++ sha256=sha256,
++ quarantined_by="system" if should_quarantine else None,
+ )
+
+ except Exception as e:
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0035-Convert-Sliding-Sync-tests-to-use-higher-level-compu.patch b/packages/overlays/matrix-synapse/patches/0035-Convert-Sliding-Sync-tests-to-use-higher-level-compu.patch
new file mode 100644
index 0000000..d34a1be
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0035-Convert-Sliding-Sync-tests-to-use-higher-level-compu.patch
@@ -0,0 +1,2816 @@
+From ae877aa101796a0cd57c3637a875140ddb25ed51 Mon Sep 17 00:00:00 2001
+From: Devon Hudson <devon.dmytro@gmail.com>
+Date: Wed, 7 May 2025 15:07:58 +0000
+Subject: [PATCH 35/74] Convert Sliding Sync tests to use higher-level
+ `compute_interested_rooms` (#18399)
+
+Spawning from
+https://github.com/element-hq/synapse/pull/18375#discussion_r2071768635,
+
+This updates some sliding sync tests to use a higher level function in
+order to move test coverage to cover both fallback & new tables.
+Important when https://github.com/element-hq/synapse/pull/18375 is
+merged.
+
+In other words, adjust tests to target `compute_interested_room(...)`
+(relevant to both new and fallback path) instead of the lower level
+`get_room_membership_for_user_at_to_token(...)` that only applies to the
+fallback path.
+
+### Dev notes
+
+```
+SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.handlers.test_sliding_sync.ComputeInterestedRoomsTestCase_new
+```
+
+```
+SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.rest.client.sliding_sync
+```
+
+```
+SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_TEST_LOG_LEVEL=INFO poetry run trial tests.handlers.test_sliding_sync.ComputeInterestedRoomsTestCase_new.test_display_name_changes_leave_after_token_range
+```
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [x] Pull request is based on the develop branch
+* [x] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [x] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+
+---------
+
+Co-authored-by: Eric Eastwood <erice@element.io>
+---
+ changelog.d/18399.misc | 1 +
+ synapse/handlers/sliding_sync/room_lists.py | 122 +-
+ synapse/storage/_base.py | 10 +-
+ synapse/storage/databases/main/cache.py | 23 +-
+ synapse/storage/databases/main/roommember.py | 135 +-
+ synapse/storage/databases/main/stream.py | 2 +
+ tests/handlers/test_sliding_sync.py | 1382 +++++++++++++-----
+ 7 files changed, 1238 insertions(+), 437 deletions(-)
+ create mode 100644 changelog.d/18399.misc
+
+diff --git a/changelog.d/18399.misc b/changelog.d/18399.misc
+new file mode 100644
+index 0000000000..847dc9a2b1
+--- /dev/null
++++ b/changelog.d/18399.misc
+@@ -0,0 +1 @@
++Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Simplified Sliding Sync room list tests to cover both new and fallback logic paths.
+diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py
+index a1730b7e05..7e3cf539df 100644
+--- a/synapse/handlers/sliding_sync/room_lists.py
++++ b/synapse/handlers/sliding_sync/room_lists.py
+@@ -244,14 +244,47 @@ class SlidingSyncRoomLists:
+ # Note: this won't include rooms the user has left themselves. We add back
+ # `newly_left` rooms below. This is more efficient than fetching all rooms and
+ # then filtering out the old left rooms.
+- room_membership_for_user_map = await self.store.get_sliding_sync_rooms_for_user(
+- user_id
++ room_membership_for_user_map = (
++ await self.store.get_sliding_sync_rooms_for_user_from_membership_snapshots(
++ user_id
++ )
++ )
++ # To play nice with the rewind logic below, we need to go fetch the rooms the
++ # user has left themselves but only if it changed after the `to_token`.
++ #
++ # If a leave happens *after* the token range, we may have still been joined (or
++ # any non-self-leave which is relevant to sync) to the room before so we need to
++ # include it in the list of potentially relevant rooms and apply our rewind
++ # logic (outside of this function) to see if it's actually relevant.
++ #
++ # We do this separately from
++ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` as those results
++ # are cached and the `to_token` isn't very cache friendly (people are constantly
++ # requesting with new tokens) so we separate it out here.
++ self_leave_room_membership_for_user_map = (
++ await self.store.get_sliding_sync_self_leave_rooms_after_to_token(
++ user_id, to_token
++ )
+ )
++ if self_leave_room_membership_for_user_map:
++ # FIXME: It would be nice to avoid this copy but since
++ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
++ # can't return a mutable value like a `dict`. We make the copy to get a
++ # mutable dict that we can change. We try to only make a copy when necessary
++ # (if we actually need to change something) as in most cases, the logic
++ # doesn't need to run.
++ room_membership_for_user_map = dict(room_membership_for_user_map)
++ room_membership_for_user_map.update(self_leave_room_membership_for_user_map)
+
+ # Remove invites from ignored users
+ ignored_users = await self.store.ignored_users(user_id)
+ if ignored_users:
+- # TODO: It would be nice to avoid these copies
++ # FIXME: It would be nice to avoid this copy but since
++ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
++ # can't return a mutable value like a `dict`. We make the copy to get a
++ # mutable dict that we can change. We try to only make a copy when necessary
++ # (if we actually need to change something) as in most cases, the logic
++ # doesn't need to run.
+ room_membership_for_user_map = dict(room_membership_for_user_map)
+ # Make a copy so we don't run into an error: `dictionary changed size during
+ # iteration`, when we remove items
+@@ -263,11 +296,23 @@ class SlidingSyncRoomLists:
+ ):
+ room_membership_for_user_map.pop(room_id, None)
+
++ (
++ newly_joined_room_ids,
++ newly_left_room_map,
++ ) = await self._get_newly_joined_and_left_rooms(
++ user_id, from_token=from_token, to_token=to_token
++ )
++
+ changes = await self._get_rewind_changes_to_current_membership_to_token(
+ sync_config.user, room_membership_for_user_map, to_token=to_token
+ )
+ if changes:
+- # TODO: It would be nice to avoid these copies
++ # FIXME: It would be nice to avoid this copy but since
++ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
++ # can't return a mutable value like a `dict`. We make the copy to get a
++ # mutable dict that we can change. We try to only make a copy when necessary
++ # (if we actually need to change something) as in most cases, the logic
++ # doesn't need to run.
+ room_membership_for_user_map = dict(room_membership_for_user_map)
+ for room_id, change in changes.items():
+ if change is None:
+@@ -278,7 +323,7 @@ class SlidingSyncRoomLists:
+ existing_room = room_membership_for_user_map.get(room_id)
+ if existing_room is not None:
+ # Update room membership events to the point in time of the `to_token`
+- room_membership_for_user_map[room_id] = RoomsForUserSlidingSync(
++ room_for_user = RoomsForUserSlidingSync(
+ room_id=room_id,
+ sender=change.sender,
+ membership=change.membership,
+@@ -290,18 +335,18 @@ class SlidingSyncRoomLists:
+ room_type=existing_room.room_type,
+ is_encrypted=existing_room.is_encrypted,
+ )
+-
+- (
+- newly_joined_room_ids,
+- newly_left_room_map,
+- ) = await self._get_newly_joined_and_left_rooms(
+- user_id, from_token=from_token, to_token=to_token
+- )
+- dm_room_ids = await self._get_dm_rooms_for_user(user_id)
++ if filter_membership_for_sync(
++ user_id=user_id,
++ room_membership_for_user=room_for_user,
++ newly_left=room_id in newly_left_room_map,
++ ):
++ room_membership_for_user_map[room_id] = room_for_user
++ else:
++ room_membership_for_user_map.pop(room_id, None)
+
+ # Add back `newly_left` rooms (rooms left in the from -> to token range).
+ #
+- # We do this because `get_sliding_sync_rooms_for_user(...)` doesn't include
++ # We do this because `get_sliding_sync_rooms_for_user_from_membership_snapshots(...)` doesn't include
+ # rooms that the user left themselves as it's more efficient to add them back
+ # here than to fetch all rooms and then filter out the old left rooms. The user
+ # only leaves a room once in a blue moon so this barely needs to run.
+@@ -310,7 +355,12 @@ class SlidingSyncRoomLists:
+ newly_left_room_map.keys() - room_membership_for_user_map.keys()
+ )
+ if missing_newly_left_rooms:
+- # TODO: It would be nice to avoid these copies
++ # FIXME: It would be nice to avoid this copy but since
++ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
++ # can't return a mutable value like a `dict`. We make the copy to get a
++ # mutable dict that we can change. We try to only make a copy when necessary
++ # (if we actually need to change something) as in most cases, the logic
++ # doesn't need to run.
+ room_membership_for_user_map = dict(room_membership_for_user_map)
+ for room_id in missing_newly_left_rooms:
+ newly_left_room_for_user = newly_left_room_map[room_id]
+@@ -327,14 +377,21 @@ class SlidingSyncRoomLists:
+ # If the membership exists, it's just a normal user left the room on
+ # their own
+ if newly_left_room_for_user_sliding_sync is not None:
+- room_membership_for_user_map[room_id] = (
+- newly_left_room_for_user_sliding_sync
+- )
++ if filter_membership_for_sync(
++ user_id=user_id,
++ room_membership_for_user=newly_left_room_for_user_sliding_sync,
++ newly_left=room_id in newly_left_room_map,
++ ):
++ room_membership_for_user_map[room_id] = (
++ newly_left_room_for_user_sliding_sync
++ )
++ else:
++ room_membership_for_user_map.pop(room_id, None)
+
+ change = changes.get(room_id)
+ if change is not None:
+ # Update room membership events to the point in time of the `to_token`
+- room_membership_for_user_map[room_id] = RoomsForUserSlidingSync(
++ room_for_user = RoomsForUserSlidingSync(
+ room_id=room_id,
+ sender=change.sender,
+ membership=change.membership,
+@@ -346,6 +403,14 @@ class SlidingSyncRoomLists:
+ room_type=newly_left_room_for_user_sliding_sync.room_type,
+ is_encrypted=newly_left_room_for_user_sliding_sync.is_encrypted,
+ )
++ if filter_membership_for_sync(
++ user_id=user_id,
++ room_membership_for_user=room_for_user,
++ newly_left=room_id in newly_left_room_map,
++ ):
++ room_membership_for_user_map[room_id] = room_for_user
++ else:
++ room_membership_for_user_map.pop(room_id, None)
+
+ # If we are `newly_left` from the room but can't find any membership,
+ # then we have been "state reset" out of the room
+@@ -367,7 +432,7 @@ class SlidingSyncRoomLists:
+ newly_left_room_for_user.event_pos.to_room_stream_token(),
+ )
+
+- room_membership_for_user_map[room_id] = RoomsForUserSlidingSync(
++ room_for_user = RoomsForUserSlidingSync(
+ room_id=room_id,
+ sender=newly_left_room_for_user.sender,
+ membership=newly_left_room_for_user.membership,
+@@ -378,6 +443,16 @@ class SlidingSyncRoomLists:
+ room_type=room_type,
+ is_encrypted=is_encrypted,
+ )
++ if filter_membership_for_sync(
++ user_id=user_id,
++ room_membership_for_user=room_for_user,
++ newly_left=room_id in newly_left_room_map,
++ ):
++ room_membership_for_user_map[room_id] = room_for_user
++ else:
++ room_membership_for_user_map.pop(room_id, None)
++
++ dm_room_ids = await self._get_dm_rooms_for_user(user_id)
+
+ if sync_config.lists:
+ sync_room_map = room_membership_for_user_map
+@@ -493,7 +568,12 @@ class SlidingSyncRoomLists:
+
+ if sync_config.room_subscriptions:
+ with start_active_span("assemble_room_subscriptions"):
+- # TODO: It would be nice to avoid these copies
++ # FIXME: It would be nice to avoid this copy but since
++ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
++ # can't return a mutable value like a `dict`. We make the copy to get a
++ # mutable dict that we can change. We try to only make a copy when necessary
++ # (if we actually need to change something) as in most cases, the logic
++ # doesn't need to run.
+ room_membership_for_user_map = dict(room_membership_for_user_map)
+
+ # Find which rooms are partially stated and may need to be filtered out
+diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
+index 7251e72e3a..b5fe7dd858 100644
+--- a/synapse/storage/_base.py
++++ b/synapse/storage/_base.py
+@@ -130,7 +130,7 @@ class SQLBaseStore(metaclass=ABCMeta):
+ "_get_rooms_for_local_user_where_membership_is_inner", (user_id,)
+ )
+ self._attempt_to_invalidate_cache(
+- "get_sliding_sync_rooms_for_user", (user_id,)
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", (user_id,)
+ )
+
+ # Purge other caches based on room state.
+@@ -138,7 +138,9 @@ class SQLBaseStore(metaclass=ABCMeta):
+ self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,))
+ self._attempt_to_invalidate_cache("get_room_type", (room_id,))
+ self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
+- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
++ self._attempt_to_invalidate_cache(
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None
++ )
+
+ def _invalidate_state_caches_all(self, room_id: str) -> None:
+ """Invalidates caches that are based on the current state, but does
+@@ -168,7 +170,9 @@ class SQLBaseStore(metaclass=ABCMeta):
+ self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
+ self._attempt_to_invalidate_cache("get_room_type", (room_id,))
+ self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
+- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
++ self._attempt_to_invalidate_cache(
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None
++ )
+
+ def _attempt_to_invalidate_cache(
+ self, cache_name: str, key: Optional[Collection[Any]]
+diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
+index f364464c23..9418fb6dd7 100644
+--- a/synapse/storage/databases/main/cache.py
++++ b/synapse/storage/databases/main/cache.py
+@@ -307,7 +307,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
+ "get_rooms_for_user", (data.state_key,)
+ )
+ self._attempt_to_invalidate_cache(
+- "get_sliding_sync_rooms_for_user", None
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None
+ )
+ self._membership_stream_cache.entity_has_changed(data.state_key, token) # type: ignore[attr-defined]
+ elif data.type == EventTypes.RoomEncryption:
+@@ -319,7 +319,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
+
+ if (data.type, data.state_key) in SLIDING_SYNC_RELEVANT_STATE_SET:
+ self._attempt_to_invalidate_cache(
+- "get_sliding_sync_rooms_for_user", None
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None
+ )
+ elif row.type == EventsStreamAllStateRow.TypeId:
+ assert isinstance(data, EventsStreamAllStateRow)
+@@ -330,7 +330,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
+ self._attempt_to_invalidate_cache("get_rooms_for_user", None)
+ self._attempt_to_invalidate_cache("get_room_type", (data.room_id,))
+ self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,))
+- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
++ self._attempt_to_invalidate_cache(
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None
++ )
+ else:
+ raise Exception("Unknown events stream row type %s" % (row.type,))
+
+@@ -394,7 +396,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
+ "_get_rooms_for_local_user_where_membership_is_inner", (state_key,)
+ )
+ self._attempt_to_invalidate_cache(
+- "get_sliding_sync_rooms_for_user", (state_key,)
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots",
++ (state_key,),
+ )
+
+ self._attempt_to_invalidate_cache(
+@@ -413,7 +416,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
+ self._attempt_to_invalidate_cache("get_room_encryption", (room_id,))
+
+ if (etype, state_key) in SLIDING_SYNC_RELEVANT_STATE_SET:
+- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
++ self._attempt_to_invalidate_cache(
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None
++ )
+
+ if relates_to:
+ self._attempt_to_invalidate_cache(
+@@ -470,7 +475,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
+ self._attempt_to_invalidate_cache(
+ "_get_rooms_for_local_user_where_membership_is_inner", None
+ )
+- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
++ self._attempt_to_invalidate_cache(
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None
++ )
+ self._attempt_to_invalidate_cache("did_forget", None)
+ self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
+ self._attempt_to_invalidate_cache("get_references_for_event", None)
+@@ -529,7 +536,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
+ self._attempt_to_invalidate_cache(
+ "get_current_hosts_in_room_ordered", (room_id,)
+ )
+- self._attempt_to_invalidate_cache("get_sliding_sync_rooms_for_user", None)
++ self._attempt_to_invalidate_cache(
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots", None
++ )
+ self._attempt_to_invalidate_cache("did_forget", None)
+ self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
+ self._attempt_to_invalidate_cache("_get_membership_from_event_id", None)
+diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
+index b8c78baa6c..2084776543 100644
+--- a/synapse/storage/databases/main/roommember.py
++++ b/synapse/storage/databases/main/roommember.py
+@@ -53,6 +53,7 @@ from synapse.storage.database import (
+ )
+ from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
+ from synapse.storage.databases.main.events_worker import EventsWorkerStore
++from synapse.storage.databases.main.stream import _filter_results_by_stream
+ from synapse.storage.engines import Sqlite3Engine
+ from synapse.storage.roommember import (
+ MemberSummary,
+@@ -65,6 +66,7 @@ from synapse.types import (
+ PersistedEventPosition,
+ StateMap,
+ StrCollection,
++ StreamToken,
+ get_domain_from_id,
+ )
+ from synapse.util.caches.descriptors import _CacheContext, cached, cachedList
+@@ -1389,7 +1391,9 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
+ txn, self.get_forgotten_rooms_for_user, (user_id,)
+ )
+ self._invalidate_cache_and_stream(
+- txn, self.get_sliding_sync_rooms_for_user, (user_id,)
++ txn,
++ self.get_sliding_sync_rooms_for_user_from_membership_snapshots,
++ (user_id,),
+ )
+
+ await self.db_pool.runInteraction("forget_membership", f)
+@@ -1421,25 +1425,30 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
+ )
+
+ @cached(iterable=True, max_entries=10000)
+- async def get_sliding_sync_rooms_for_user(
+- self,
+- user_id: str,
++ async def get_sliding_sync_rooms_for_user_from_membership_snapshots(
++ self, user_id: str
+ ) -> Mapping[str, RoomsForUserSlidingSync]:
+- """Get all the rooms for a user to handle a sliding sync request.
++ """
++ Get all the rooms for a user to handle a sliding sync request from the
++ `sliding_sync_membership_snapshots` table. These will be current memberships and
++ need to be rewound to the token range.
+
+ Ignores forgotten rooms and rooms that the user has left themselves.
+
++ Args:
++ user_id: The user ID to get the rooms for.
++
+ Returns:
+ Map from room ID to membership info
+ """
+
+- def get_sliding_sync_rooms_for_user_txn(
++ def _txn(
+ txn: LoggingTransaction,
+ ) -> Dict[str, RoomsForUserSlidingSync]:
+ # XXX: If you use any new columns that can change (like from
+ # `sliding_sync_joined_rooms` or `forgotten`), make sure to bust the
+- # `get_sliding_sync_rooms_for_user` cache in the appropriate places (and add
+- # tests).
++ # `get_sliding_sync_rooms_for_user_from_membership_snapshots` cache in the
++ # appropriate places (and add tests).
+ sql = """
+ SELECT m.room_id, m.sender, m.membership, m.membership_event_id,
+ r.room_version,
+@@ -1455,6 +1464,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
+ AND (m.membership != 'leave' OR m.user_id != m.sender)
+ """
+ txn.execute(sql, (user_id,))
++
+ return {
+ row[0]: RoomsForUserSlidingSync(
+ room_id=row[0],
+@@ -1475,8 +1485,113 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
+ }
+
+ return await self.db_pool.runInteraction(
+- "get_sliding_sync_rooms_for_user",
+- get_sliding_sync_rooms_for_user_txn,
++ "get_sliding_sync_rooms_for_user_from_membership_snapshots",
++ _txn,
++ )
++
++ async def get_sliding_sync_self_leave_rooms_after_to_token(
++ self,
++ user_id: str,
++ to_token: StreamToken,
++ ) -> Dict[str, RoomsForUserSlidingSync]:
++ """
++ Get all the self-leave rooms for a user after the `to_token` (outside the token
++ range) that are potentially relevant[1] and needed to handle a sliding sync
++ request. The results are from the `sliding_sync_membership_snapshots` table and
++ will be current memberships and need to be rewound to the token range.
++
++ [1] If a leave happens after the token range, we may have still been joined (or
++ any non-self-leave which is relevant to sync) to the room before so we need to
++ include it in the list of potentially relevant rooms and apply
++ our rewind logic (outside of this function) to see if it's actually relevant.
++
++ This is basically a sister-function to
++ `get_sliding_sync_rooms_for_user_from_membership_snapshots`. We could
++ alternatively incorporate this logic into
++ `get_sliding_sync_rooms_for_user_from_membership_snapshots` but those results
++ are cached and the `to_token` isn't very cache friendly (people are constantly
++ requesting with new tokens) so we separate it out here.
++
++ Args:
++ user_id: The user ID to get the rooms for.
++ to_token: Any self-leave memberships after this position will be returned.
++
++ Returns:
++ Map from room ID to membership info
++ """
++ # TODO: Potential to check
++ # `self._membership_stream_cache.has_entity_changed(...)` as an early-return
++ # shortcut.
++
++ def _txn(
++ txn: LoggingTransaction,
++ ) -> Dict[str, RoomsForUserSlidingSync]:
++ sql = """
++ SELECT m.room_id, m.sender, m.membership, m.membership_event_id,
++ r.room_version,
++ m.event_instance_name, m.event_stream_ordering,
++ m.has_known_state,
++ m.room_type,
++ m.is_encrypted
++ FROM sliding_sync_membership_snapshots AS m
++ INNER JOIN rooms AS r USING (room_id)
++ WHERE user_id = ?
++ AND m.forgotten = 0
++ AND m.membership = 'leave'
++ AND m.user_id = m.sender
++ AND (m.event_stream_ordering > ?)
++ """
++ # If a leave happens after the token range, we may have still been joined
++ # (or any non-self-leave which is relevant to sync) to the room before so we
++ # need to include it in the list of potentially relevant rooms and apply our
++ # rewind logic (outside of this function).
++ #
++ # To handle tokens with a non-empty instance_map we fetch more
++ # results than necessary and then filter down
++ min_to_token_position = to_token.room_key.stream
++ txn.execute(sql, (user_id, min_to_token_position))
++
++ # Map from room_id to membership info
++ room_membership_for_user_map: Dict[str, RoomsForUserSlidingSync] = {}
++ for row in txn:
++ room_for_user = RoomsForUserSlidingSync(
++ room_id=row[0],
++ sender=row[1],
++ membership=row[2],
++ event_id=row[3],
++ room_version_id=row[4],
++ event_pos=PersistedEventPosition(row[5], row[6]),
++ has_known_state=bool(row[7]),
++ room_type=row[8],
++ is_encrypted=bool(row[9]),
++ )
++
++ # We filter out unknown room versions proactively. They shouldn't go
++ # down sync and their metadata may be in a broken state (causing
++ # errors).
++ if row[4] not in KNOWN_ROOM_VERSIONS:
++ continue
++
++ # We only want to include the self-leave membership if it happened after
++ # the token range.
++ #
++ # Since the database pulls out more than necessary, we need to filter it
++ # down here.
++ if _filter_results_by_stream(
++ lower_token=None,
++ upper_token=to_token.room_key,
++ instance_name=room_for_user.event_pos.instance_name,
++ stream_ordering=room_for_user.event_pos.stream,
++ ):
++ continue
++
++ room_membership_for_user_map[room_for_user.room_id] = room_for_user
++
++ return room_membership_for_user_map
++
++ return await self.db_pool.runInteraction(
++ "get_sliding_sync_self_leave_rooms_after_to_token",
++ _txn,
+ )
+
+ async def get_sliding_sync_room_for_user(
+diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
+index 00e5208674..c52389b8a9 100644
+--- a/synapse/storage/databases/main/stream.py
++++ b/synapse/storage/databases/main/stream.py
+@@ -453,6 +453,8 @@ def _filter_results_by_stream(
+ stream_ordering falls between the two tokens (taking a None
+ token to mean unbounded).
+
++ The token range is defined by > `lower_token` and <= `upper_token`.
++
+ Used to filter results from fetching events in the DB against the given
+ tokens. This is necessary to handle the case where the tokens include
+ position maps, which we handle by fetching more than necessary from the DB
+diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
+index 5b7e2937f8..cbacf21ae7 100644
+--- a/tests/handlers/test_sliding_sync.py
++++ b/tests/handlers/test_sliding_sync.py
+@@ -22,7 +22,7 @@ from typing import AbstractSet, Dict, Mapping, Optional, Set, Tuple
+ from unittest.mock import patch
+
+ import attr
+-from parameterized import parameterized
++from parameterized import parameterized, parameterized_class
+
+ from twisted.test.proto_helpers import MemoryReactor
+
+@@ -43,13 +43,15 @@ from synapse.rest import admin
+ from synapse.rest.client import knock, login, room
+ from synapse.server import HomeServer
+ from synapse.storage.util.id_generators import MultiWriterIdGenerator
+-from synapse.types import JsonDict, StateMap, StreamToken, UserID
+-from synapse.types.handlers.sliding_sync import SlidingSyncConfig
++from synapse.types import JsonDict, StateMap, StreamToken, UserID, create_requester
++from synapse.types.handlers.sliding_sync import PerConnectionState, SlidingSyncConfig
+ from synapse.types.state import StateFilter
+ from synapse.util import Clock
+
+ from tests import unittest
+ from tests.replication._base import BaseMultiWorkerStreamTestCase
++from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
++from tests.test_utils.event_injection import create_event
+ from tests.unittest import HomeserverTestCase, TestCase
+
+ logger = logging.getLogger(__name__)
+@@ -572,9 +574,23 @@ class RoomSyncConfigTestCase(TestCase):
+ self._assert_room_config_equal(combined_config, expected, "A into B")
+
+
+-class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
++# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
++# foreground update for
++# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
++# https://github.com/element-hq/synapse/issues/17623)
++@parameterized_class(
++ ("use_new_tables",),
++ [
++ (True,),
++ (False,),
++ ],
++ class_name_func=lambda cls,
++ num,
++ params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
++)
++class ComputeInterestedRoomsTestCase(SlidingSyncBase):
+ """
+- Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it returns
++ Tests Sliding Sync handler `compute_interested_rooms()` to make sure it returns
+ the correct list of rooms IDs.
+ """
+
+@@ -596,6 +612,11 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ self.store = self.hs.get_datastores().main
+ self.event_sources = hs.get_event_sources()
+ self.storage_controllers = hs.get_storage_controllers()
++ persistence = self.hs.get_storage_controllers().persistence
++ assert persistence is not None
++ self.persistence = persistence
++
++ super().prepare(reactor, clock, hs)
+
+ def test_no_rooms(self) -> None:
+ """
+@@ -606,15 +627,28 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ now_token = self.event_sources.get_current_token()
+
+- room_id_results, _, _ = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=now_token,
+ to_token=now_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
+
+- self.assertEqual(room_id_results.keys(), set())
++ self.assertIncludes(room_id_results, set(), exact=True)
+
+ def test_get_newly_joined_room(self) -> None:
+ """
+@@ -633,22 +667,44 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ after_room_token = self.event_sources.get_current_token()
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room_token,
+ to_token=after_room_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+- self.assertEqual(room_id_results.keys(), {room_id})
++ self.assertIncludes(
++ room_id_results,
++ {room_id},
++ exact=True,
++ )
+ # It should be pointing to the join event (latest membership event in the
+ # from/to range)
+ self.assertEqual(
+- room_id_results[room_id].event_id,
++ interested_rooms.room_membership_for_user_map[room_id].event_id,
+ join_response["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id].membership,
++ Membership.JOIN,
++ )
+ # We should be considered `newly_joined` because we joined during the token
+ # range
+ self.assertTrue(room_id in newly_joined)
+@@ -668,22 +724,40 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ after_room_token = self.event_sources.get_current_token()
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room_token,
+ to_token=after_room_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+- self.assertEqual(room_id_results.keys(), {room_id})
++ self.assertIncludes(room_id_results, {room_id}, exact=True)
+ # It should be pointing to the join event (latest membership event in the
+ # from/to range)
+ self.assertEqual(
+- room_id_results[room_id].event_id,
++ interested_rooms.room_membership_for_user_map[room_id].event_id,
+ join_response["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id].membership,
++ Membership.JOIN,
++ )
+ # We should *NOT* be `newly_joined` because we joined before the token range
+ self.assertTrue(room_id not in newly_joined)
+ self.assertTrue(room_id not in newly_left)
+@@ -742,46 +816,71 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ after_room_token = self.event_sources.get_current_token()
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room_token,
+ to_token=after_room_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Ensure that the invited, ban, and knock rooms show up
+- self.assertEqual(
+- room_id_results.keys(),
++ self.assertIncludes(
++ room_id_results,
+ {
+ invited_room_id,
+ ban_room_id,
+ knock_room_id,
+ },
++ exact=True,
+ )
+ # It should be pointing to the the respective membership event (latest
+ # membership event in the from/to range)
+ self.assertEqual(
+- room_id_results[invited_room_id].event_id,
++ interested_rooms.room_membership_for_user_map[invited_room_id].event_id,
+ invite_response["event_id"],
+ )
+- self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[invited_room_id].membership,
++ Membership.INVITE,
++ )
+ self.assertTrue(invited_room_id not in newly_joined)
+ self.assertTrue(invited_room_id not in newly_left)
+
+ self.assertEqual(
+- room_id_results[ban_room_id].event_id,
++ interested_rooms.room_membership_for_user_map[ban_room_id].event_id,
+ ban_response["event_id"],
+ )
+- self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[ban_room_id].membership,
++ Membership.BAN,
++ )
+ self.assertTrue(ban_room_id not in newly_joined)
+ self.assertTrue(ban_room_id not in newly_left)
+
+ self.assertEqual(
+- room_id_results[knock_room_id].event_id,
++ interested_rooms.room_membership_for_user_map[knock_room_id].event_id,
+ knock_room_membership_state_event.event_id,
+ )
+- self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[knock_room_id].membership,
++ Membership.KNOCK,
++ )
+ self.assertTrue(knock_room_id not in newly_joined)
+ self.assertTrue(knock_room_id not in newly_left)
+
+@@ -814,23 +913,43 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ after_kick_token = self.event_sources.get_current_token()
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_kick_token,
+ to_token=after_kick_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # The kicked room should show up
+- self.assertEqual(room_id_results.keys(), {kick_room_id})
++ self.assertIncludes(room_id_results, {kick_room_id}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[kick_room_id].event_id,
++ interested_rooms.room_membership_for_user_map[kick_room_id].event_id,
+ kick_response["event_id"],
+ )
+- self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE)
+- self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[kick_room_id].membership,
++ Membership.LEAVE,
++ )
++ self.assertNotEqual(
++ interested_rooms.room_membership_for_user_map[kick_room_id].sender, user1_id
++ )
+ # We should *NOT* be `newly_joined` because we were not joined at the the time
+ # of the `to_token`.
+ self.assertTrue(kick_room_id not in newly_joined)
+@@ -907,16 +1026,29 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room_forgets,
+ to_token=before_room_forgets,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
+
+ # We shouldn't see the room because it was forgotten
+- self.assertEqual(room_id_results.keys(), set())
++ self.assertIncludes(room_id_results, set(), exact=True)
+
+ def test_newly_left_rooms(self) -> None:
+ """
+@@ -927,7 +1059,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ # Leave before we calculate the `from_token`
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+- leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
++ _leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+@@ -937,31 +1069,52 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ after_room2_token = self.event_sources.get_current_token()
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_room2_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+- self.assertEqual(room_id_results.keys(), {room_id1, room_id2})
+-
+- self.assertEqual(
+- room_id_results[room_id1].event_id,
+- leave_response1["event_id"],
++ # `room_id1` should not show up because it was left before the token range.
++ # `room_id2` should show up because it is `newly_left` within the token range.
++ self.assertIncludes(
++ room_id_results,
++ {room_id2},
++ exact=True,
++ message="Corresponding map to disambiguate the opaque room IDs: "
++ + str(
++ {
++ "room_id1": room_id1,
++ "room_id2": room_id2,
++ }
++ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+- # We should *NOT* be `newly_joined` or `newly_left` because that happened before
+- # the from/to range
+- self.assertTrue(room_id1 not in newly_joined)
+- self.assertTrue(room_id1 not in newly_left)
+
+ self.assertEqual(
+- room_id_results[room_id2].event_id,
++ interested_rooms.room_membership_for_user_map[room_id2].event_id,
+ leave_response2["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id2].membership,
++ Membership.LEAVE,
++ )
+ # We should *NOT* be `newly_joined` because we are instead `newly_left`
+ self.assertTrue(room_id2 not in newly_joined)
+ self.assertTrue(room_id2 in newly_left)
+@@ -987,21 +1140,39 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_response1["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should be `newly_joined` because we joined during the token range
+ self.assertTrue(room_id1 in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+@@ -1027,20 +1198,35 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ # Leave the room after we already have our tokens
+ leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # We should still see the room because we were joined during the
+ # from_token/to_token time period.
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_response["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1050,7 +1236,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should be `newly_joined` because we joined during the token range
+ self.assertTrue(room_id1 in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+@@ -1074,19 +1263,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ # Leave the room after we already have our tokens
+ leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # We should still see the room because we were joined before the `from_token`
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_response["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1096,7 +1300,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should *NOT* be `newly_joined` because we joined before the token range
+ self.assertTrue(room_id1 not in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+@@ -1138,19 +1345,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ join_response2 = self.helper.join(kick_room_id, user1_id, tok=user1_tok)
+ leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_kick_token,
+ to_token=after_kick_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # We shouldn't see the room because it was forgotten
+- self.assertEqual(room_id_results.keys(), {kick_room_id})
++ self.assertIncludes(room_id_results, {kick_room_id}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[kick_room_id].event_id,
++ interested_rooms.room_membership_for_user_map[kick_room_id].event_id,
+ kick_response["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1162,8 +1384,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE)
+- self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[kick_room_id].membership,
++ Membership.LEAVE,
++ )
++ self.assertNotEqual(
++ interested_rooms.room_membership_for_user_map[kick_room_id].sender, user1_id
++ )
+ # We should *NOT* be `newly_joined` because we were kicked
+ self.assertTrue(kick_room_id not in newly_joined)
+ self.assertTrue(kick_room_id not in newly_left)
+@@ -1194,19 +1421,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should still show up because it's newly_left during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ leave_response1["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1218,7 +1460,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.LEAVE,
++ )
+ # We should *NOT* be `newly_joined` because we are actually `newly_left` during
+ # the token range
+ self.assertTrue(room_id1 not in newly_joined)
+@@ -1249,19 +1494,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ # Join the room after we already have our tokens
+ join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should still show up because it's newly_left during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ leave_response1["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1272,7 +1532,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.LEAVE,
++ )
+ # We should *NOT* be `newly_joined` because we are actually `newly_left` during
+ # the token range
+ self.assertTrue(room_id1 not in newly_joined)
+@@ -1301,47 +1564,53 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ # Join and leave the room2 before the `to_token`
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+- leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
++ _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Join the room2 after we already have our tokens
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=None,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Only rooms we were joined to before the `to_token` should show up
+- self.assertEqual(room_id_results.keys(), {room_id1, room_id2})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+
+ # Room1
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_response1["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
+- # We should *NOT* be `newly_joined`/`newly_left` because there is no
+- # `from_token` to define a "live" range to compare against
+- self.assertTrue(room_id1 not in newly_joined)
+- self.assertTrue(room_id1 not in newly_left)
+-
+- # Room2
+- # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id2].event_id,
+- leave_response2["event_id"],
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
+ )
+- self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE)
+ # We should *NOT* be `newly_joined`/`newly_left` because there is no
+ # `from_token` to define a "live" range to compare against
+- self.assertTrue(room_id2 not in newly_joined)
+- self.assertTrue(room_id2 not in newly_left)
++ self.assertTrue(room_id1 not in newly_joined)
++ self.assertTrue(room_id1 not in newly_left)
+
+ def test_from_token_ahead_of_to_token(self) -> None:
+ """
+@@ -1365,7 +1634,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ # Join and leave the room2 before `to_token`
+ _join_room2_response1 = self.helper.join(room_id2, user1_id, tok=user1_tok)
+- leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
++ _leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok)
+
+ # Note: These are purposely swapped. The `from_token` should come after
+ # the `to_token` in this test
+@@ -1390,55 +1659,70 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ # Join the room4 after we already have our tokens
+ self.helper.join(room_id4, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=from_token,
+ to_token=to_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # In the "current" state snapshot, we're joined to all of the rooms but in the
+ # from/to token range...
+ self.assertIncludes(
+- room_id_results.keys(),
++ room_id_results,
+ {
+ # Included because we were joined before both tokens
+ room_id1,
+- # Included because we had membership before the to_token
+- room_id2,
++ # Excluded because we left before the `from_token` and `to_token`
++ # room_id2,
+ # Excluded because we joined after the `to_token`
+ # room_id3,
+ # Excluded because we joined after the `to_token`
+ # room_id4,
+ },
+ exact=True,
++ message="Corresponding map to disambiguate the opaque room IDs: "
++ + str(
++ {
++ "room_id1": room_id1,
++ "room_id2": room_id2,
++ "room_id3": room_id3,
++ "room_id4": room_id4,
++ }
++ ),
+ )
+
+ # Room1
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_room1_response1["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should *NOT* be `newly_joined`/`newly_left` because we joined `room1`
+ # before either of the tokens
+ self.assertTrue(room_id1 not in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+
+- # Room2
+- # It should be pointing to the latest membership event in the from/to range
+- self.assertEqual(
+- room_id_results[room_id2].event_id,
+- leave_room2_response1["event_id"],
+- )
+- self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE)
+- # We should *NOT* be `newly_joined`/`newly_left` because we joined and left
+- # `room1` before either of the tokens
+- self.assertTrue(room_id2 not in newly_joined)
+- self.assertTrue(room_id2 not in newly_left)
+-
+ def test_leave_before_range_and_join_leave_after_to_token(self) -> None:
+ """
+ Test old left rooms. But we're also testing that joining and leaving after the
+@@ -1455,7 +1739,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # Join and leave the room before the from/to range
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+- leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
++ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+@@ -1463,25 +1747,28 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
+
+- self.assertEqual(room_id_results.keys(), {room_id1})
+- # It should be pointing to the latest membership event in the from/to range
+- self.assertEqual(
+- room_id_results[room_id1].event_id,
+- leave_response["event_id"],
+- )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+- # We should *NOT* be `newly_joined`/`newly_left` because we joined and left
+- # `room1` before either of the tokens
+- self.assertTrue(room_id1 not in newly_joined)
+- self.assertTrue(room_id1 not in newly_left)
++ self.assertIncludes(room_id_results, set(), exact=True)
+
+ def test_leave_before_range_and_join_after_to_token(self) -> None:
+ """
+@@ -1499,32 +1786,35 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # Join and leave the room before the from/to range
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+- leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
++ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+ after_room1_token = self.event_sources.get_current_token()
+
+ # Join the room after we already have our tokens
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
+
+- self.assertEqual(room_id_results.keys(), {room_id1})
+- # It should be pointing to the latest membership event in the from/to range
+- self.assertEqual(
+- room_id_results[room_id1].event_id,
+- leave_response["event_id"],
+- )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+- # We should *NOT* be `newly_joined`/`newly_left` because we joined and left
+- # `room1` before either of the tokens
+- self.assertTrue(room_id1 not in newly_joined)
+- self.assertTrue(room_id1 not in newly_left)
++ self.assertIncludes(room_id_results, set(), exact=True)
+
+ def test_join_leave_multiple_times_during_range_and_after_to_token(
+ self,
+@@ -1556,19 +1846,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because it was newly_left and joined during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_response2["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1582,7 +1887,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should be `newly_joined` because we joined during the token range
+ self.assertTrue(room_id1 in newly_joined)
+ # We should *NOT* be `newly_left` because we joined during the token range and
+@@ -1618,19 +1926,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because we were joined before the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_response2["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1644,7 +1967,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should *NOT* be `newly_joined` because we joined before the token range
+ self.assertTrue(room_id1 not in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+@@ -1677,19 +2003,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ join_respsonse = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because we were invited before the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ invite_response["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1700,7 +2041,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.INVITE)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.INVITE,
++ )
+ # We should *NOT* be `newly_joined` because we were only invited before the
+ # token range
+ self.assertTrue(room_id1 not in newly_joined)
+@@ -1751,19 +2095,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ tok=user1_tok,
+ )
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because we were joined during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ displayname_change_during_token_range_response["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1778,7 +2137,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should be `newly_joined` because we joined during the token range
+ self.assertTrue(room_id1 in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+@@ -1816,19 +2178,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ after_change1_token = self.event_sources.get_current_token()
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_change1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because we were joined during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ displayname_change_during_token_range_response["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1840,7 +2217,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should *NOT* be `newly_joined` because we joined before the token range
+ self.assertTrue(room_id1 not in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+@@ -1888,19 +2268,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ tok=user1_tok,
+ )
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because we were joined before the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ displayname_change_before_token_range_response["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1915,18 +2310,22 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should *NOT* be `newly_joined` because we joined before the token range
+ self.assertTrue(room_id1 not in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+
+- def test_display_name_changes_leave_after_token_range(
++ def test_newly_joined_display_name_changes_leave_after_token_range(
+ self,
+ ) -> None:
+ """
+ Test that we point to the correct membership event within the from/to range even
+- if there are multiple `join` membership events in a row indicating
+- `displayname`/`avatar_url` updates and we leave after the `to_token`.
++ if we are `newly_joined` and there are multiple `join` membership events in a
++ row indicating `displayname`/`avatar_url` updates and we leave after the
++ `to_token`.
+
+ See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method.
+ """
+@@ -1941,6 +2340,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ # leave and can still re-join.
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
++
+ # Update the displayname during the token range
+ displayname_change_during_token_range_response = self.helper.send_state(
+ room_id1,
+@@ -1970,19 +2370,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ # Leave after the token
+ self.helper.leave(room_id1, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because we were joined during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ displayname_change_during_token_range_response["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -1997,11 +2412,118 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should be `newly_joined` because we joined during the token range
+ self.assertTrue(room_id1 in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+
++ def test_display_name_changes_leave_after_token_range(
++ self,
++ ) -> None:
++ """
++ Test that we point to the correct membership event within the from/to range even
++ if there are multiple `join` membership events in a row indicating
++ `displayname`/`avatar_url` updates and we leave after the `to_token`.
++
++ See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method.
++ """
++ user1_id = self.register_user("user1", "pass")
++ user1_tok = self.login(user1_id, "pass")
++ user2_id = self.register_user("user2", "pass")
++ user2_tok = self.login(user2_id, "pass")
++
++ _before_room1_token = self.event_sources.get_current_token()
++
++ # We create the room with user2 so the room isn't left with no members when we
++ # leave and can still re-join.
++ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
++ join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
++
++ after_join_token = self.event_sources.get_current_token()
++
++ # Update the displayname during the token range
++ displayname_change_during_token_range_response = self.helper.send_state(
++ room_id1,
++ event_type=EventTypes.Member,
++ state_key=user1_id,
++ body={
++ "membership": Membership.JOIN,
++ "displayname": "displayname during token range",
++ },
++ tok=user1_tok,
++ )
++
++ after_display_name_change_token = self.event_sources.get_current_token()
++
++ # Update the displayname after the token range
++ displayname_change_after_token_range_response = self.helper.send_state(
++ room_id1,
++ event_type=EventTypes.Member,
++ state_key=user1_id,
++ body={
++ "membership": Membership.JOIN,
++ "displayname": "displayname after token range",
++ },
++ tok=user1_tok,
++ )
++
++ # Leave after the token
++ self.helper.leave(room_id1, user1_id, tok=user1_tok)
++
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
++ from_token=after_join_token,
++ to_token=after_display_name_change_token,
++ )
++ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
++
++ # Room should show up because we were joined during the from/to range
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
++ # It should be pointing to the latest membership event in the from/to range
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
++ displayname_change_during_token_range_response["event_id"],
++ "Corresponding map to disambiguate the opaque event IDs: "
++ + str(
++ {
++ "join_response": join_response["event_id"],
++ "displayname_change_during_token_range_response": displayname_change_during_token_range_response[
++ "event_id"
++ ],
++ "displayname_change_after_token_range_response": displayname_change_after_token_range_response[
++ "event_id"
++ ],
++ }
++ ),
++ )
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
++ # We only changed our display name during the token range so we shouldn't be
++ # considered `newly_joined` or `newly_left`
++ self.assertTrue(room_id1 not in newly_joined)
++ self.assertTrue(room_id1 not in newly_left)
++
+ def test_display_name_changes_join_after_token_range(
+ self,
+ ) -> None:
+@@ -2038,16 +2560,29 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ tok=user1_tok,
+ )
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
+
+ # Room shouldn't show up because we joined after the from/to range
+- self.assertEqual(room_id_results.keys(), set())
++ self.assertIncludes(room_id_results, set(), exact=True)
+
+ def test_newly_joined_with_leave_join_in_token_range(
+ self,
+@@ -2074,22 +2609,40 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ after_more_changes_token = self.event_sources.get_current_token()
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=after_room1_token,
+ to_token=after_more_changes_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because we were joined during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_response2["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should be considered `newly_joined` because there is some non-join event in
+ # between our latest join event.
+ self.assertTrue(room_id1 in newly_joined)
+@@ -2139,19 +2692,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ after_room1_token = self.event_sources.get_current_token()
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room1_token,
+ to_token=after_room1_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room should show up because it was newly_left and joined during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1})
++ self.assertIncludes(room_id_results, {room_id1}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ displayname_change_during_token_range_response2["event_id"],
+ "Corresponding map to disambiguate the opaque event IDs: "
+ + str(
+@@ -2166,7 +2734,10 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ }
+ ),
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should be `newly_joined` because we first joined during the token range
+ self.assertTrue(room_id1 in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+@@ -2192,7 +2763,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ # Invited and left the room before the token
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+- leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
++ _leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ # Invited to room2
+ invite_room2_response = self.helper.invite(
+ room_id2, src=user2_id, targ=user1_id, tok=user2_tok
+@@ -2215,45 +2786,52 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ # Leave room3
+ self.helper.leave(room_id3, user1_id, tok=user1_tok)
+
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_room3_token,
+ to_token=after_room3_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+- self.assertEqual(
+- room_id_results.keys(),
++ self.assertIncludes(
++ room_id_results,
+ {
+- # Left before the from/to range
+- room_id1,
++ # Excluded because we left before the from/to range
++ # room_id1,
+ # Invited before the from/to range
+ room_id2,
+ # `newly_left` during the from/to range
+ room_id3,
+ },
++ exact=True,
+ )
+
+- # Room1
+- # It should be pointing to the latest membership event in the from/to range
+- self.assertEqual(
+- room_id_results[room_id1].event_id,
+- leave_room1_response["event_id"],
+- )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
+- # We should *NOT* be `newly_joined`/`newly_left` because we were invited and left
+- # before the token range
+- self.assertTrue(room_id1 not in newly_joined)
+- self.assertTrue(room_id1 not in newly_left)
+-
+ # Room2
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id2].event_id,
++ interested_rooms.room_membership_for_user_map[room_id2].event_id,
+ invite_room2_response["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id2].membership, Membership.INVITE)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id2].membership,
++ Membership.INVITE,
++ )
+ # We should *NOT* be `newly_joined`/`newly_left` because we were invited before
+ # the token range
+ self.assertTrue(room_id2 not in newly_joined)
+@@ -2262,10 +2840,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ # Room3
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id3].event_id,
++ interested_rooms.room_membership_for_user_map[room_id3].event_id,
+ leave_room3_response["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id3].membership, Membership.LEAVE)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id3].membership,
++ Membership.LEAVE,
++ )
+ # We should be `newly_left` because we were invited and left during
+ # the token range
+ self.assertTrue(room_id3 not in newly_joined)
+@@ -2282,7 +2863,16 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+ user2_tok = self.login(user2_id, "pass")
+
+ # The room where the state reset will happen
+- room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
++ room_id1 = self.helper.create_room_as(
++ user2_id,
++ is_public=True,
++ tok=user2_tok,
++ )
++ # Create a dummy event for us to point back to for the state reset
++ dummy_event_response = self.helper.send(room_id1, "test", tok=user2_tok)
++ dummy_event_id = dummy_event_response["event_id"]
++
++ # Join after the dummy event
+ join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # Join another room so we don't hit the short-circuit and return early if they
+@@ -2292,92 +2882,97 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):
+
+ before_reset_token = self.event_sources.get_current_token()
+
+- # Send another state event to make a position for the state reset to happen at
+- dummy_state_response = self.helper.send_state(
+- room_id1,
+- event_type="foobarbaz",
+- state_key="",
+- body={"foo": "bar"},
+- tok=user2_tok,
+- )
+- dummy_state_pos = self.get_success(
+- self.store.get_position_for_event(dummy_state_response["event_id"])
+- )
+-
+- # Mock a state reset removing the membership for user1 in the current state
+- self.get_success(
+- self.store.db_pool.simple_delete(
+- table="current_state_events",
+- keyvalues={
+- "room_id": room_id1,
+- "type": EventTypes.Member,
+- "state_key": user1_id,
+- },
+- desc="state reset user in current_state_events",
++ # Trigger a state reset
++ join_rule_event, join_rule_context = self.get_success(
++ create_event(
++ self.hs,
++ prev_event_ids=[dummy_event_id],
++ type=EventTypes.JoinRules,
++ state_key="",
++ content={"join_rule": JoinRules.INVITE},
++ sender=user2_id,
++ room_id=room_id1,
++ room_version=self.get_success(self.store.get_room_version_id(room_id1)),
+ )
+ )
+- self.get_success(
+- self.store.db_pool.simple_delete(
+- table="local_current_membership",
+- keyvalues={
+- "room_id": room_id1,
+- "user_id": user1_id,
+- },
+- desc="state reset user in local_current_membership",
+- )
+- )
+- self.get_success(
+- self.store.db_pool.simple_insert(
+- table="current_state_delta_stream",
+- values={
+- "stream_id": dummy_state_pos.stream,
+- "room_id": room_id1,
+- "type": EventTypes.Member,
+- "state_key": user1_id,
+- "event_id": None,
+- "prev_event_id": join_response1["event_id"],
+- "instance_name": dummy_state_pos.instance_name,
+- },
+- desc="state reset user in current_state_delta_stream",
+- )
++ _, join_rule_event_pos, _ = self.get_success(
++ self.persistence.persist_event(join_rule_event, join_rule_context)
+ )
+
+- # Manually bust the cache since we we're just manually messing with the database
+- # and not causing an actual state reset.
+- self.store._membership_stream_cache.entity_has_changed(
+- user1_id, dummy_state_pos.stream
+- )
++ # Ensure that the state reset worked and only user2 is in the room now
++ users_in_room = self.get_success(self.store.get_users_in_room(room_id1))
++ self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
+
+ after_reset_token = self.event_sources.get_current_token()
+
+ # The function under test
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_reset_token,
+ to_token=after_reset_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+ # Room1 should show up because it was `newly_left` via state reset during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1, room_id2})
++ self.assertIncludes(room_id_results, {room_id1, room_id2}, exact=True)
+ # It should be pointing to no event because we were removed from the room
+ # without a corresponding leave event
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ None,
++ "Corresponding map to disambiguate the opaque event IDs: "
++ + str(
++ {
++ "join_response1": join_response1["event_id"],
++ }
++ ),
+ )
+ # State reset caused us to leave the room and there is no corresponding leave event
+- self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.LEAVE,
++ )
+ # We should *NOT* be `newly_joined` because we joined before the token range
+ self.assertTrue(room_id1 not in newly_joined)
+ # We should be `newly_left` because we were removed via state reset during the from/to range
+ self.assertTrue(room_id1 in newly_left)
+
+
+-class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCase):
++# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
++# foreground update for
++# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
++# https://github.com/element-hq/synapse/issues/17623)
++@parameterized_class(
++ ("use_new_tables",),
++ [
++ (True,),
++ (False,),
++ ],
++ class_name_func=lambda cls,
++ num,
++ params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}",
++)
++class ComputeInterestedRoomsShardTestCase(
++ BaseMultiWorkerStreamTestCase, SlidingSyncBase
++):
+ """
+- Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it works with
++ Tests Sliding Sync handler `compute_interested_rooms()` to make sure it works with
+ sharded event stream_writers enabled
+ """
+
+@@ -2475,7 +3070,7 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa
+ join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
+ # Leave room2
+- leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok)
++ _leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok)
+ join_response3 = self.helper.join(room_id3, user1_id, tok=user1_tok)
+ # Leave room3
+ self.helper.leave(room_id3, user1_id, tok=user1_tok)
+@@ -2565,57 +3160,74 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa
+ self.get_success(actx.__aexit__(None, None, None))
+
+ # The function under test
+- room_id_results, newly_joined, newly_left = self.get_success(
+- self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
+- UserID.from_string(user1_id),
++ interested_rooms = self.get_success(
++ self.sliding_sync_handler.room_lists.compute_interested_rooms(
++ SlidingSyncConfig(
++ user=UserID.from_string(user1_id),
++ requester=create_requester(user_id=user1_id),
++ lists={
++ "foo-list": SlidingSyncConfig.SlidingSyncList(
++ ranges=[(0, 99)],
++ required_state=[],
++ timeline_limit=1,
++ )
++ },
++ conn_id=None,
++ ),
++ PerConnectionState(),
+ from_token=before_stuck_activity_token,
+ to_token=stuck_activity_token,
+ )
+ )
++ room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids)
++ newly_joined = interested_rooms.newly_joined_rooms
++ newly_left = interested_rooms.newly_left_rooms
+
+- self.assertEqual(
+- room_id_results.keys(),
++ self.assertIncludes(
++ room_id_results,
+ {
+ room_id1,
+- room_id2,
++ # Excluded because we left before the from/to range and the second join
++ # event happened while worker2 was stuck and technically occurs after
++ # the `stuck_activity_token`.
++ # room_id2,
+ room_id3,
+ },
++ exact=True,
++ message="Corresponding map to disambiguate the opaque room IDs: "
++ + str(
++ {
++ "room_id1": room_id1,
++ "room_id2": room_id2,
++ "room_id3": room_id3,
++ }
++ ),
+ )
+
+ # Room1
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id1].event_id,
++ interested_rooms.room_membership_for_user_map[room_id1].event_id,
+ join_room1_response["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id1].membership,
++ Membership.JOIN,
++ )
+ # We should be `newly_joined` because we joined during the token range
+ self.assertTrue(room_id1 in newly_joined)
+ self.assertTrue(room_id1 not in newly_left)
+
+- # Room2
+- # It should be pointing to the latest membership event in the from/to range
+- self.assertEqual(
+- room_id_results[room_id2].event_id,
+- leave_room2_response["event_id"],
+- )
+- self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE)
+- # room_id2 should *NOT* be considered `newly_left` because we left before the
+- # from/to range and the join event during the range happened while worker2 was
+- # stuck. This means that from the perspective of the master, where the
+- # `stuck_activity_token` is generated, the stream position for worker2 wasn't
+- # advanced to the join yet. Looking at the `instance_map`, the join technically
+- # comes after `stuck_activity_token`.
+- self.assertTrue(room_id2 not in newly_joined)
+- self.assertTrue(room_id2 not in newly_left)
+-
+ # Room3
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+- room_id_results[room_id3].event_id,
++ interested_rooms.room_membership_for_user_map[room_id3].event_id,
+ join_on_worker3_response["event_id"],
+ )
+- self.assertEqual(room_id_results[room_id3].membership, Membership.JOIN)
++ self.assertEqual(
++ interested_rooms.room_membership_for_user_map[room_id3].membership,
++ Membership.JOIN,
++ )
+ # We should be `newly_joined` because we joined during the token range
+ self.assertTrue(room_id3 in newly_joined)
+ self.assertTrue(room_id3 not in newly_left)
+@@ -2645,6 +3257,9 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+ self.store = self.hs.get_datastores().main
+ self.event_sources = hs.get_event_sources()
+ self.storage_controllers = hs.get_storage_controllers()
++ persistence = self.hs.get_storage_controllers().persistence
++ assert persistence is not None
++ self.persistence = persistence
+
+ def _get_sync_room_ids_for_user(
+ self,
+@@ -2687,7 +3302,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+ to_token=now_token,
+ )
+
+- self.assertEqual(room_id_results.keys(), set())
++ self.assertIncludes(room_id_results.keys(), set(), exact=True)
+
+ def test_basic_rooms(self) -> None:
+ """
+@@ -2753,7 +3368,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+ )
+
+ # Ensure that the invited, ban, and knock rooms show up
+- self.assertEqual(
++ self.assertIncludes(
+ room_id_results.keys(),
+ {
+ join_room_id,
+@@ -2761,6 +3376,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+ ban_room_id,
+ knock_room_id,
+ },
++ exact=True,
+ )
+ # It should be pointing to the the respective membership event (latest
+ # membership event in the from/to range)
+@@ -2824,7 +3440,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+ )
+
+ # Only the `newly_left` room should show up
+- self.assertEqual(room_id_results.keys(), {room_id2})
++ self.assertIncludes(room_id_results.keys(), {room_id2}, exact=True)
+ self.assertEqual(
+ room_id_results[room_id2].event_id,
+ _leave_response2["event_id"],
+@@ -2869,7 +3485,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+ )
+
+ # The kicked room should show up
+- self.assertEqual(room_id_results.keys(), {kick_room_id})
++ self.assertIncludes(room_id_results.keys(), {kick_room_id}, exact=True)
+ # It should be pointing to the latest membership event in the from/to range
+ self.assertEqual(
+ room_id_results[kick_room_id].event_id,
+@@ -2893,8 +3509,17 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+ user2_tok = self.login(user2_id, "pass")
+
+ # The room where the state reset will happen
+- room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+- join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
++ room_id1 = self.helper.create_room_as(
++ user2_id,
++ is_public=True,
++ tok=user2_tok,
++ )
++ # Create a dummy event for us to point back to for the state reset
++ dummy_event_response = self.helper.send(room_id1, "test", tok=user2_tok)
++ dummy_event_id = dummy_event_response["event_id"]
++
++ # Join after the dummy event
++ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # Join another room so we don't hit the short-circuit and return early if they
+ # have no room membership
+@@ -2903,61 +3528,26 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+
+ before_reset_token = self.event_sources.get_current_token()
+
+- # Send another state event to make a position for the state reset to happen at
+- dummy_state_response = self.helper.send_state(
+- room_id1,
+- event_type="foobarbaz",
+- state_key="",
+- body={"foo": "bar"},
+- tok=user2_tok,
+- )
+- dummy_state_pos = self.get_success(
+- self.store.get_position_for_event(dummy_state_response["event_id"])
+- )
+-
+- # Mock a state reset removing the membership for user1 in the current state
+- self.get_success(
+- self.store.db_pool.simple_delete(
+- table="current_state_events",
+- keyvalues={
+- "room_id": room_id1,
+- "type": EventTypes.Member,
+- "state_key": user1_id,
+- },
+- desc="state reset user in current_state_events",
+- )
+- )
+- self.get_success(
+- self.store.db_pool.simple_delete(
+- table="local_current_membership",
+- keyvalues={
+- "room_id": room_id1,
+- "user_id": user1_id,
+- },
+- desc="state reset user in local_current_membership",
++ # Trigger a state reset
++ join_rule_event, join_rule_context = self.get_success(
++ create_event(
++ self.hs,
++ prev_event_ids=[dummy_event_id],
++ type=EventTypes.JoinRules,
++ state_key="",
++ content={"join_rule": JoinRules.INVITE},
++ sender=user2_id,
++ room_id=room_id1,
++ room_version=self.get_success(self.store.get_room_version_id(room_id1)),
+ )
+ )
+- self.get_success(
+- self.store.db_pool.simple_insert(
+- table="current_state_delta_stream",
+- values={
+- "stream_id": dummy_state_pos.stream,
+- "room_id": room_id1,
+- "type": EventTypes.Member,
+- "state_key": user1_id,
+- "event_id": None,
+- "prev_event_id": join_response1["event_id"],
+- "instance_name": dummy_state_pos.instance_name,
+- },
+- desc="state reset user in current_state_delta_stream",
+- )
++ _, join_rule_event_pos, _ = self.get_success(
++ self.persistence.persist_event(join_rule_event, join_rule_context)
+ )
+
+- # Manually bust the cache since we we're just manually messing with the database
+- # and not causing an actual state reset.
+- self.store._membership_stream_cache.entity_has_changed(
+- user1_id, dummy_state_pos.stream
+- )
++ # Ensure that the state reset worked and only user2 is in the room now
++ users_in_room = self.get_success(self.store.get_users_in_room(room_id1))
++ self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
+
+ after_reset_token = self.event_sources.get_current_token()
+
+@@ -2969,7 +3559,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase):
+ )
+
+ # Room1 should show up because it was `newly_left` via state reset during the from/to range
+- self.assertEqual(room_id_results.keys(), {room_id1, room_id2})
++ self.assertIncludes(room_id_results.keys(), {room_id1, room_id2}, exact=True)
+ # It should be pointing to no event because we were removed from the room
+ # without a corresponding leave event
+ self.assertEqual(
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0036-Pass-leave-from-remote-invite-rejection-down-Sliding.patch b/packages/overlays/matrix-synapse/patches/0036-Pass-leave-from-remote-invite-rejection-down-Sliding.patch
new file mode 100644
index 0000000..ffb0912
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0036-Pass-leave-from-remote-invite-rejection-down-Sliding.patch
@@ -0,0 +1,537 @@
+From 7c633f1a58e22ea27a172efdc52d94bfdac8c728 Mon Sep 17 00:00:00 2001
+From: Devon Hudson <devon.dmytro@gmail.com>
+Date: Thu, 8 May 2025 14:28:23 +0000
+Subject: [PATCH 36/74] Pass leave from remote invite rejection down Sliding
+ Sync (#18375)
+
+Fixes #17753
+
+
+### Dev notes
+
+The `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms`
+database tables were added in
+https://github.com/element-hq/synapse/pull/17512
+
+### Pull Request Checklist
+
+<!-- Please read
+https://element-hq.github.io/synapse/latest/development/contributing_guide.html
+before submitting your pull request -->
+
+* [X] Pull request is based on the develop branch
+* [x] Pull request includes a [changelog
+file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
+The entry should:
+- Be a short description of your change which makes sense to users.
+"Fixed a bug that prevented receiving messages from other servers."
+instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+ - Use markdown where necessary, mostly for `code blocks`.
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+- Feel free to credit yourself, by adding a sentence "Contributed by
+@github_username." or "Contributed by [Your Name]." to the end of the
+entry.
+* [X] [Code
+style](https://element-hq.github.io/synapse/latest/code_style.html) is
+correct
+(run the
+[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+
+---------
+
+Co-authored-by: Erik Johnston <erik@matrix.org>
+Co-authored-by: Olivier 'reivilibre <oliverw@matrix.org>
+Co-authored-by: Eric Eastwood <erice@element.io>
+---
+ changelog.d/18375.bugfix | 1 +
+ synapse/handlers/sliding_sync/__init__.py | 23 ++
+ synapse/handlers/sliding_sync/room_lists.py | 49 ++++-
+ synapse/storage/databases/main/stream.py | 202 ++++++++++++++++++
+ .../92/03_ss_membership_snapshot_idx.sql | 16 ++
+ tests/handlers/test_sliding_sync.py | 12 ++
+ .../client/sliding_sync/test_sliding_sync.py | 58 +++++
+ 7 files changed, 360 insertions(+), 1 deletion(-)
+ create mode 100644 changelog.d/18375.bugfix
+ create mode 100644 synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql
+
+diff --git a/changelog.d/18375.bugfix b/changelog.d/18375.bugfix
+new file mode 100644
+index 0000000000..faebe6f046
+--- /dev/null
++++ b/changelog.d/18375.bugfix
+@@ -0,0 +1 @@
++Pass leave from remote invite rejection down Sliding Sync.
+diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py
+index 459d3c3e24..cb56eb53fc 100644
+--- a/synapse/handlers/sliding_sync/__init__.py
++++ b/synapse/handlers/sliding_sync/__init__.py
+@@ -271,6 +271,7 @@ class SlidingSyncHandler:
+ from_token=from_token,
+ to_token=to_token,
+ newly_joined=room_id in interested_rooms.newly_joined_rooms,
++ newly_left=room_id in interested_rooms.newly_left_rooms,
+ is_dm=room_id in interested_rooms.dm_room_ids,
+ )
+
+@@ -542,6 +543,7 @@ class SlidingSyncHandler:
+ from_token: Optional[SlidingSyncStreamToken],
+ to_token: StreamToken,
+ newly_joined: bool,
++ newly_left: bool,
+ is_dm: bool,
+ ) -> SlidingSyncResult.RoomResult:
+ """
+@@ -559,6 +561,7 @@ class SlidingSyncHandler:
+ from_token: The point in the stream to sync from.
+ to_token: The point in the stream to sync up to.
+ newly_joined: If the user has newly joined the room
++ newly_left: If the user has newly left the room
+ is_dm: Whether the room is a DM room
+ """
+ user = sync_config.user
+@@ -856,6 +859,26 @@ class SlidingSyncHandler:
+ # TODO: Limit the number of state events we're about to send down
+ # the room, if its too many we should change this to an
+ # `initial=True`?
++
++ # For the case of rejecting remote invites, the leave event won't be
++ # returned by `get_current_state_deltas_for_room`. This is due to the current
++ # state only being filled out for rooms the server is in, and so doesn't pick
++ # up out-of-band leaves (including locally rejected invites) as these events
++ # are outliers and not added to the `current_state_delta_stream`.
++ #
++ # We rely on being explicitly told that the room has been `newly_left` to
++ # ensure we extract the out-of-band leave.
++ if newly_left and room_membership_for_user_at_to_token.event_id is not None:
++ membership_changed = True
++ leave_event = await self.store.get_event(
++ room_membership_for_user_at_to_token.event_id
++ )
++ state_key = leave_event.get_state_key()
++ if state_key is not None:
++ room_state_delta_id_map[(leave_event.type, state_key)] = (
++ room_membership_for_user_at_to_token.event_id
++ )
++
+ deltas = await self.get_current_state_deltas_for_room(
+ room_id=room_id,
+ room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
+diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py
+index 7e3cf539df..6d1ac91605 100644
+--- a/synapse/handlers/sliding_sync/room_lists.py
++++ b/synapse/handlers/sliding_sync/room_lists.py
+@@ -1120,7 +1120,7 @@ class SlidingSyncRoomLists:
+ (
+ newly_joined_room_ids,
+ newly_left_room_map,
+- ) = await self._get_newly_joined_and_left_rooms(
++ ) = await self._get_newly_joined_and_left_rooms_fallback(
+ user_id, to_token=to_token, from_token=from_token
+ )
+
+@@ -1176,6 +1176,53 @@ class SlidingSyncRoomLists:
+ "state reset" out of the room, and so that room would not be part of the
+ "current memberships" of the user.
+
++ Returns:
++ A 2-tuple of newly joined room IDs and a map of newly_left room
++ IDs to the `RoomsForUserStateReset` entry.
++
++ We're using `RoomsForUserStateReset` but that doesn't necessarily mean the
++ user was state reset of the rooms. It's just that the `event_id`/`sender`
++ are optional and we can't tell the difference between the server leaving the
++ room when the user was the last person participating in the room and left or
++ was state reset out of the room. To actually check for a state reset, you
++ need to check if a membership still exists in the room.
++ """
++
++ newly_joined_room_ids: Set[str] = set()
++ newly_left_room_map: Dict[str, RoomsForUserStateReset] = {}
++
++ if not from_token:
++ return newly_joined_room_ids, newly_left_room_map
++
++ changes = await self.store.get_sliding_sync_membership_changes(
++ user_id,
++ from_key=from_token.room_key,
++ to_key=to_token.room_key,
++ excluded_room_ids=set(self.rooms_to_exclude_globally),
++ )
++
++ for room_id, entry in changes.items():
++ if entry.membership == Membership.JOIN:
++ newly_joined_room_ids.add(room_id)
++ elif entry.membership == Membership.LEAVE:
++ newly_left_room_map[room_id] = entry
++
++ return newly_joined_room_ids, newly_left_room_map
++
++ @trace
++ async def _get_newly_joined_and_left_rooms_fallback(
++ self,
++ user_id: str,
++ to_token: StreamToken,
++ from_token: Optional[StreamToken],
++ ) -> Tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]:
++ """Fetch the sets of rooms that the user newly joined or left in the
++ given token range.
++
++ Note: there may be rooms in the newly left rooms where the user was
++ "state reset" out of the room, and so that room would not be part of the
++ "current memberships" of the user.
++
+ Returns:
+ A 2-tuple of newly joined room IDs and a map of newly_left room
+ IDs to the `RoomsForUserStateReset` entry.
+diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
+index c52389b8a9..3fda49f31f 100644
+--- a/synapse/storage/databases/main/stream.py
++++ b/synapse/storage/databases/main/stream.py
+@@ -80,6 +80,7 @@ from synapse.storage.database import (
+ )
+ from synapse.storage.databases.main.events_worker import EventsWorkerStore
+ from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
++from synapse.storage.roommember import RoomsForUserStateReset
+ from synapse.storage.util.id_generators import MultiWriterIdGenerator
+ from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection
+ from synapse.util.caches.descriptors import cached, cachedList
+@@ -993,6 +994,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
+ available in the `current_state_delta_stream` table. To actually check for a
+ state reset, you need to check if a membership still exists in the room.
+ """
++
++ assert from_key.topological is None
++ assert to_key.topological is None
++
+ # Start by ruling out cases where a DB query is not necessary.
+ if from_key == to_key:
+ return []
+@@ -1138,6 +1143,203 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
+ if membership_change.room_id not in room_ids_to_exclude
+ ]
+
++ @trace
++ async def get_sliding_sync_membership_changes(
++ self,
++ user_id: str,
++ from_key: RoomStreamToken,
++ to_key: RoomStreamToken,
++ excluded_room_ids: Optional[AbstractSet[str]] = None,
++ ) -> Dict[str, RoomsForUserStateReset]:
++ """
++ Fetch membership events that result in a meaningful membership change for a
++ given user.
++
++ A meaningful membership changes is one where the `membership` value actually
++ changes. This means memberships changes from `join` to `join` (like a display
++ name change) will be filtered out since they result in no meaningful change.
++
++ Note: This function only works with "live" tokens with `stream_ordering` only.
++
++ We're looking for membership changes in the token range (> `from_key` and <=
++ `to_key`).
++
++ Args:
++ user_id: The user ID to fetch membership events for.
++ from_key: The point in the stream to sync from (fetching events > this point).
++ to_key: The token to fetch rooms up to (fetching events <= this point).
++ excluded_room_ids: Optional list of room IDs to exclude from the results.
++
++ Returns:
++ All meaningful membership changes to the current state in the token range.
++ Events are sorted by `stream_ordering` ascending.
++
++ `event_id`/`sender` can be `None` when the server leaves a room (meaning
++ everyone locally left) or a state reset which removed the person from the
++ room. We can't tell the difference between the two cases with what's
++ available in the `current_state_delta_stream` table. To actually check for a
++ state reset, you need to check if a membership still exists in the room.
++ """
++
++ assert from_key.topological is None
++ assert to_key.topological is None
++
++ # Start by ruling out cases where a DB query is not necessary.
++ if from_key == to_key:
++ return {}
++
++ if from_key:
++ has_changed = self._membership_stream_cache.has_entity_changed(
++ user_id, int(from_key.stream)
++ )
++ if not has_changed:
++ return {}
++
++ room_ids_to_exclude: AbstractSet[str] = set()
++ if excluded_room_ids is not None:
++ room_ids_to_exclude = excluded_room_ids
++
++ def f(txn: LoggingTransaction) -> Dict[str, RoomsForUserStateReset]:
++ # To handle tokens with a non-empty instance_map we fetch more
++ # results than necessary and then filter down
++ min_from_id = from_key.stream
++ max_to_id = to_key.get_max_stream_pos()
++
++ # This query looks at membership changes in
++ # `sliding_sync_membership_snapshots` which will not include users
++ # that were state reset out of rooms; so we need to look for that
++ # case in `current_state_delta_stream`.
++ sql = """
++ SELECT
++ room_id,
++ membership_event_id,
++ event_instance_name,
++ event_stream_ordering,
++ membership,
++ sender,
++ prev_membership,
++ room_version
++ FROM
++ (
++ SELECT
++ s.room_id,
++ s.membership_event_id,
++ s.event_instance_name,
++ s.event_stream_ordering,
++ s.membership,
++ s.sender,
++ m_prev.membership AS prev_membership
++ FROM sliding_sync_membership_snapshots as s
++ LEFT JOIN event_edges AS e ON e.event_id = s.membership_event_id
++ LEFT JOIN room_memberships AS m_prev ON m_prev.event_id = e.prev_event_id
++ WHERE s.user_id = ?
++
++ UNION ALL
++
++ SELECT
++ s.room_id,
++ e.event_id,
++ s.instance_name,
++ s.stream_id,
++ m.membership,
++ e.sender,
++ m_prev.membership AS prev_membership
++ FROM current_state_delta_stream AS s
++ LEFT JOIN events AS e ON e.event_id = s.event_id
++ LEFT JOIN room_memberships AS m ON m.event_id = s.event_id
++ LEFT JOIN room_memberships AS m_prev ON m_prev.event_id = s.prev_event_id
++ WHERE
++ s.type = ?
++ AND s.state_key = ?
++ ) AS c
++ INNER JOIN rooms USING (room_id)
++ WHERE event_stream_ordering > ? AND event_stream_ordering <= ?
++ ORDER BY event_stream_ordering ASC
++ """
++
++ txn.execute(
++ sql,
++ (user_id, EventTypes.Member, user_id, min_from_id, max_to_id),
++ )
++
++ membership_changes: Dict[str, RoomsForUserStateReset] = {}
++ for (
++ room_id,
++ membership_event_id,
++ event_instance_name,
++ event_stream_ordering,
++ membership,
++ sender,
++ prev_membership,
++ room_version_id,
++ ) in txn:
++ assert room_id is not None
++ assert event_stream_ordering is not None
++
++ if room_id in room_ids_to_exclude:
++ continue
++
++ if _filter_results_by_stream(
++ from_key,
++ to_key,
++ event_instance_name,
++ event_stream_ordering,
++ ):
++ # When the server leaves a room, it will insert new rows into the
++ # `current_state_delta_stream` table with `event_id = null` for all
++ # current state. This means we might already have a row for the
++ # leave event and then another for the same leave where the
++ # `event_id=null` but the `prev_event_id` is pointing back at the
++ # earlier leave event. We don't want to report the leave, if we
++ # already have a leave event.
++ if (
++ membership_event_id is None
++ and prev_membership == Membership.LEAVE
++ ):
++ continue
++
++ if membership_event_id is None and room_id in membership_changes:
++ # SUSPICIOUS: if we join a room and get state reset out of it
++ # in the same queried window,
++ # won't this ignore the 'state reset out of it' part?
++ continue
++
++ # When `s.event_id = null`, we won't be able to get respective
++ # `room_membership` but can assume the user has left the room
++ # because this only happens when the server leaves a room
++ # (meaning everyone locally left) or a state reset which removed
++ # the person from the room.
++ membership = (
++ membership if membership is not None else Membership.LEAVE
++ )
++
++ if membership == prev_membership:
++ # If `membership` and `prev_membership` are the same then this
++ # is not a meaningful change so we can skip it.
++ # An example of this happening is when the user changes their display name.
++ continue
++
++ membership_change = RoomsForUserStateReset(
++ room_id=room_id,
++ sender=sender,
++ membership=membership,
++ event_id=membership_event_id,
++ event_pos=PersistedEventPosition(
++ event_instance_name, event_stream_ordering
++ ),
++ room_version_id=room_version_id,
++ )
++
++ membership_changes[room_id] = membership_change
++
++ return membership_changes
++
++ membership_changes = await self.db_pool.runInteraction(
++ "get_sliding_sync_membership_changes", f
++ )
++
++ return membership_changes
++
+ @cancellable
+ async def get_membership_changes_for_user(
+ self,
+diff --git a/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql b/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql
+new file mode 100644
+index 0000000000..c694203f95
+--- /dev/null
++++ b/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql
+@@ -0,0 +1,16 @@
++--
++-- This file is licensed under the Affero General Public License (AGPL) version 3.
++--
++-- Copyright (C) 2025 New Vector, Ltd
++--
++-- This program is free software: you can redistribute it and/or modify
++-- it under the terms of the GNU Affero General Public License as
++-- published by the Free Software Foundation, either version 3 of the
++-- License, or (at your option) any later version.
++--
++-- See the GNU Affero General Public License for more details:
++-- <https://www.gnu.org/licenses/agpl-3.0.html>.
++
++-- So we can fetch all rooms for a given user sorted by stream order
++DROP INDEX IF EXISTS sliding_sync_membership_snapshots_user_id;
++CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id, event_stream_ordering);
+diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
+index cbacf21ae7..7144c58217 100644
+--- a/tests/handlers/test_sliding_sync.py
++++ b/tests/handlers/test_sliding_sync.py
+@@ -594,6 +594,12 @@ class ComputeInterestedRoomsTestCase(SlidingSyncBase):
+ the correct list of rooms IDs.
+ """
+
++ # FIXME: We should refactor these tests to run against `compute_interested_rooms(...)`
++ # instead of just `get_room_membership_for_user_at_to_token(...)` which is only used
++ # in the fallback path (`_compute_interested_rooms_fallback(...)`). These scenarios do
++ # well to stress that logic and we shouldn't remove them just because we're removing
++ # the fallback path (tracked by https://github.com/element-hq/synapse/issues/17623).
++
+ servlets = [
+ admin.register_servlets,
+ knock.register_servlets,
+@@ -2976,6 +2982,12 @@ class ComputeInterestedRoomsShardTestCase(
+ sharded event stream_writers enabled
+ """
+
++ # FIXME: We should refactor these tests to run against `compute_interested_rooms(...)`
++ # instead of just `get_room_membership_for_user_at_to_token(...)` which is only used
++ # in the fallback path (`_compute_interested_rooms_fallback(...)`). These scenarios do
++ # well to stress that logic and we shouldn't remove them just because we're removing
++ # the fallback path (tracked by https://github.com/element-hq/synapse/issues/17623).
++
+ servlets = [
+ admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py
+index f3cf2111ec..dcec5b4cf0 100644
+--- a/tests/rest/client/sliding_sync/test_sliding_sync.py
++++ b/tests/rest/client/sliding_sync/test_sliding_sync.py
+@@ -790,6 +790,64 @@ class SlidingSyncTestCase(SlidingSyncBase):
+ exact=True,
+ )
+
++ def test_reject_remote_invite(self) -> None:
++ """Test that rejecting a remote invite comes down incremental sync"""
++
++ user_id = self.register_user("user1", "pass")
++ user_tok = self.login(user_id, "pass")
++
++ # Create a remote room invite (out-of-band membership)
++ room_id = "!room:remote.server"
++ self._create_remote_invite_room_for_user(user_id, None, room_id)
++
++ # Make the Sliding Sync request
++ sync_body = {
++ "lists": {
++ "foo-list": {
++ "ranges": [[0, 1]],
++ "required_state": [(EventTypes.Member, StateValues.ME)],
++ "timeline_limit": 3,
++ }
++ }
++ }
++ response_body, from_token = self.do_sync(sync_body, tok=user_tok)
++ # We should see the room (like normal)
++ self.assertIncludes(
++ set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
++ {room_id},
++ exact=True,
++ )
++
++ # Reject the remote room invite
++ self.helper.leave(room_id, user_id, tok=user_tok)
++
++ # Sync again after rejecting the invite
++ response_body, _ = self.do_sync(sync_body, since=from_token, tok=user_tok)
++
++ # The fix to add the leave event to incremental sync when rejecting a remote
++ # invite relies on the new tables to work.
++ if self.use_new_tables:
++ # We should see the newly_left room
++ self.assertIncludes(
++ set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
++ {room_id},
++ exact=True,
++ )
++ # We should see the leave state for the room so clients don't end up with stuck
++ # invites
++ self.assertIncludes(
++ {
++ (
++ state["type"],
++ state["state_key"],
++ state["content"].get("membership"),
++ )
++ for state in response_body["rooms"][room_id]["required_state"]
++ },
++ {(EventTypes.Member, user_id, Membership.LEAVE)},
++ exact=True,
++ )
++
+ def test_ignored_user_invites_initial_sync(self) -> None:
+ """
+ Make sure we ignore invites if they are from one of the `m.ignored_user_list` on
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0037-Bump-sha2-from-0.10.8-to-0.10.9-18395.patch b/packages/overlays/matrix-synapse/patches/0037-Bump-sha2-from-0.10.8-to-0.10.9-18395.patch
new file mode 100644
index 0000000..38b7a9c
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0037-Bump-sha2-from-0.10.8-to-0.10.9-18395.patch
@@ -0,0 +1,28 @@
+From b5d94f654c32b0cd09ba727baddba93b0bf4f63f Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Fri, 9 May 2025 15:35:18 +0100
+Subject: [PATCH 37/74] Bump sha2 from 0.10.8 to 0.10.9 (#18395)
+
+---
+ Cargo.lock | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Cargo.lock b/Cargo.lock
+index 822eb2cdba..27a2e26be5 100644
+--- a/Cargo.lock
++++ b/Cargo.lock
+@@ -480,9 +480,9 @@ dependencies = [
+
+ [[package]]
+ name = "sha2"
+-version = "0.10.8"
++version = "0.10.9"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+-checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
++checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
+ dependencies = [
+ "cfg-if",
+ "cpufeatures",
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0038-Bump-txredisapi-from-1.4.10-to-1.4.11-18392.patch b/packages/overlays/matrix-synapse/patches/0038-Bump-txredisapi-from-1.4.10-to-1.4.11-18392.patch
new file mode 100644
index 0000000..f6d13c0
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0038-Bump-txredisapi-from-1.4.10-to-1.4.11-18392.patch
@@ -0,0 +1,35 @@
+From c6dfe70014c7f577a7fa749bfb8953bd08bc69d7 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Fri, 9 May 2025 15:36:41 +0100
+Subject: [PATCH 38/74] Bump txredisapi from 1.4.10 to 1.4.11 (#18392)
+
+---
+ poetry.lock | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index abd97a785b..69d76936b0 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -2886,15 +2886,15 @@ windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)"
+
+ [[package]]
+ name = "txredisapi"
+-version = "1.4.10"
++version = "1.4.11"
+ description = "non-blocking redis client for python"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+ markers = "extra == \"all\" or extra == \"redis\""
+ files = [
+- {file = "txredisapi-1.4.10-py3-none-any.whl", hash = "sha256:0a6ea77f27f8cf092f907654f08302a97b48fa35f24e0ad99dfb74115f018161"},
+- {file = "txredisapi-1.4.10.tar.gz", hash = "sha256:7609a6af6ff4619a3189c0adfb86aeda789afba69eb59fc1e19ac0199e725395"},
++ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
++ {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
+ ]
+
+ [package.dependencies]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0039-Bump-packaging-from-24.2-to-25.0-18393.patch b/packages/overlays/matrix-synapse/patches/0039-Bump-packaging-from-24.2-to-25.0-18393.patch
new file mode 100644
index 0000000..be5695e
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0039-Bump-packaging-from-24.2-to-25.0-18393.patch
@@ -0,0 +1,34 @@
+From b7728a2df10de6cd09f5313ebca8a95e226c15fc Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Fri, 9 May 2025 15:37:05 +0100
+Subject: [PATCH 39/74] Bump packaging from 24.2 to 25.0 (#18393)
+
+---
+ poetry.lock | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index 69d76936b0..8ff3a377f4 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -1561,14 +1561,14 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
+
+ [[package]]
+ name = "packaging"
+-version = "24.2"
++version = "25.0"
+ description = "Core utilities for Python packages"
+ optional = false
+ python-versions = ">=3.8"
+ groups = ["main", "dev"]
+ files = [
+- {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
+- {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
++ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
++ {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
+ ]
+
+ [[package]]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0040-Bump-pydantic-from-2.10.3-to-2.11.4-18394.patch b/packages/overlays/matrix-synapse/patches/0040-Bump-pydantic-from-2.10.3-to-2.11.4-18394.patch
new file mode 100644
index 0000000..7693b29
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0040-Bump-pydantic-from-2.10.3-to-2.11.4-18394.patch
@@ -0,0 +1,279 @@
+From 1920dfff40ad1078071e099a2afbfa31a5409e6b Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Fri, 9 May 2025 16:36:54 +0100
+Subject: [PATCH 40/74] Bump pydantic from 2.10.3 to 2.11.4 (#18394)
+
+---
+ poetry.lock | 229 ++++++++++++++++++++++++++++------------------------
+ 1 file changed, 122 insertions(+), 107 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index 8ff3a377f4..e06e08b7a7 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -1795,20 +1795,21 @@ files = [
+
+ [[package]]
+ name = "pydantic"
+-version = "2.10.3"
++version = "2.11.4"
+ description = "Data validation using Python type hints"
+ optional = false
+-python-versions = ">=3.8"
++python-versions = ">=3.9"
+ groups = ["main", "dev"]
+ files = [
+- {file = "pydantic-2.10.3-py3-none-any.whl", hash = "sha256:be04d85bbc7b65651c5f8e6b9976ed9c6f41782a55524cef079a34a0bb82144d"},
+- {file = "pydantic-2.10.3.tar.gz", hash = "sha256:cb5ac360ce894ceacd69c403187900a02c4b20b693a9dd1d643e1effab9eadf9"},
++ {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
++ {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
+ ]
+
+ [package.dependencies]
+ annotated-types = ">=0.6.0"
+-pydantic-core = "2.27.1"
++pydantic-core = "2.33.2"
+ typing-extensions = ">=4.12.2"
++typing-inspection = ">=0.4.0"
+
+ [package.extras]
+ email = ["email-validator (>=2.0.0)"]
+@@ -1816,112 +1817,111 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows
+
+ [[package]]
+ name = "pydantic-core"
+-version = "2.27.1"
++version = "2.33.2"
+ description = "Core functionality for Pydantic validation and serialization"
+ optional = false
+-python-versions = ">=3.8"
++python-versions = ">=3.9"
+ groups = ["main", "dev"]
+ files = [
+- {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"},
+- {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"},
+- {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"},
+- {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"},
+- {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"},
+- {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"},
+- {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"},
+- {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"},
+- {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"},
+- {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"},
+- {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"},
+- {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"},
+- {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"},
+- {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"},
+- {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"},
+- {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"},
+- {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"},
+- {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"},
+- {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"},
+- {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"},
+- {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"},
+- {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"},
+- {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"},
+- {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"},
+- {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"},
++ {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"},
++ {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"},
++ {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"},
++ {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"},
++ {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"},
++ {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"},
++ {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"},
++ {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"},
++ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"},
++ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"},
++ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"},
++ {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"},
+ ]
+
+ [package.dependencies]
+@@ -3085,6 +3085,21 @@ files = [
+ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
+ ]
+
++[[package]]
++name = "typing-inspection"
++version = "0.4.0"
++description = "Runtime typing introspection tools"
++optional = false
++python-versions = ">=3.9"
++groups = ["main", "dev"]
++files = [
++ {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"},
++ {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"},
++]
++
++[package.dependencies]
++typing-extensions = ">=4.12.0"
++
+ [[package]]
+ name = "unpaddedbase64"
+ version = "2.1.0"
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0041-Bump-actions-setup-go-from-5.4.0-to-5.5.0-18426.patch b/packages/overlays/matrix-synapse/patches/0041-Bump-actions-setup-go-from-5.4.0-to-5.5.0-18426.patch
new file mode 100644
index 0000000..7d9416f
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0041-Bump-actions-setup-go-from-5.4.0-to-5.5.0-18426.patch
@@ -0,0 +1,54 @@
+From 3dade08e7cef99a83e3410365a14a21a2b24d545 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 13 May 2025 09:34:23 +0100
+Subject: [PATCH 41/74] Bump actions/setup-go from 5.4.0 to 5.5.0 (#18426)
+
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ .github/workflows/latest_deps.yml | 2 +-
+ .github/workflows/tests.yml | 2 +-
+ .github/workflows/twisted_trunk.yml | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
+index e7378ec0d3..366bb4cddb 100644
+--- a/.github/workflows/latest_deps.yml
++++ b/.github/workflows/latest_deps.yml
+@@ -200,7 +200,7 @@ jobs:
+ - name: Prepare Complement's Prerequisites
+ run: synapse/.ci/scripts/setup_complement_prerequisites.sh
+
+- - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
++ - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
+ with:
+ cache-dependency-path: complement/go.sum
+ go-version-file: complement/go.mod
+diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
+index bb2e80a908..a7e35a0ece 100644
+--- a/.github/workflows/tests.yml
++++ b/.github/workflows/tests.yml
+@@ -669,7 +669,7 @@ jobs:
+ - name: Prepare Complement's Prerequisites
+ run: synapse/.ci/scripts/setup_complement_prerequisites.sh
+
+- - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
++ - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
+ with:
+ cache-dependency-path: complement/go.sum
+ go-version-file: complement/go.mod
+diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
+index 0176f17401..5638029b39 100644
+--- a/.github/workflows/twisted_trunk.yml
++++ b/.github/workflows/twisted_trunk.yml
+@@ -173,7 +173,7 @@ jobs:
+ - name: Prepare Complement's Prerequisites
+ run: synapse/.ci/scripts/setup_complement_prerequisites.sh
+
+- - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
++ - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
+ with:
+ cache-dependency-path: complement/go.sum
+ go-version-file: complement/go.mod
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0042-Bump-pillow-from-11.1.0-to-11.2.1-18429.patch b/packages/overlays/matrix-synapse/patches/0042-Bump-pillow-from-11.1.0-to-11.2.1-18429.patch
new file mode 100644
index 0000000..69e27c6
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0042-Bump-pillow-from-11.1.0-to-11.2.1-18429.patch
@@ -0,0 +1,191 @@
+From 40ce11ded0aa32158aee4d6526b8dd40c1c63a6a Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 13 May 2025 09:46:03 +0100
+Subject: [PATCH 42/74] Bump pillow from 11.1.0 to 11.2.1 (#18429)
+
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ poetry.lock | 157 ++++++++++++++++++++++++++++------------------------
+ 1 file changed, 84 insertions(+), 73 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index e06e08b7a7..1935df638a 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -1600,89 +1600,100 @@ files = [
+
+ [[package]]
+ name = "pillow"
+-version = "11.1.0"
++version = "11.2.1"
+ description = "Python Imaging Library (Fork)"
+ optional = false
+ python-versions = ">=3.9"
+ groups = ["main"]
+ files = [
+- {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"},
+- {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"},
+- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2"},
+- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26"},
+- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07"},
+- {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482"},
+- {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e"},
+- {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269"},
+- {file = "pillow-11.1.0-cp310-cp310-win32.whl", hash = "sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49"},
+- {file = "pillow-11.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a"},
+- {file = "pillow-11.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65"},
+- {file = "pillow-11.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457"},
+- {file = "pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35"},
+- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2"},
+- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070"},
+- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6"},
+- {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1"},
+- {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2"},
+- {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96"},
+- {file = "pillow-11.1.0-cp311-cp311-win32.whl", hash = "sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f"},
+- {file = "pillow-11.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761"},
+- {file = "pillow-11.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71"},
+- {file = "pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a"},
+- {file = "pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b"},
+- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3"},
+- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a"},
+- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1"},
+- {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f"},
+- {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91"},
+- {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c"},
+- {file = "pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6"},
+- {file = "pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf"},
+- {file = "pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5"},
+- {file = "pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc"},
+- {file = "pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0"},
+- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1"},
+- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec"},
+- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5"},
+- {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114"},
+- {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352"},
+- {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3"},
+- {file = "pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9"},
+- {file = "pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c"},
+- {file = "pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65"},
+- {file = "pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861"},
+- {file = "pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081"},
+- {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c"},
+- {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547"},
+- {file = "pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab"},
+- {file = "pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9"},
+- {file = "pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe"},
+- {file = "pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756"},
+- {file = "pillow-11.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6"},
+- {file = "pillow-11.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e"},
+- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc"},
+- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2"},
+- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade"},
+- {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884"},
+- {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196"},
+- {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8"},
+- {file = "pillow-11.1.0-cp39-cp39-win32.whl", hash = "sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5"},
+- {file = "pillow-11.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f"},
+- {file = "pillow-11.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a"},
+- {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90"},
+- {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb"},
+- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442"},
+- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83"},
+- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f"},
+- {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73"},
+- {file = "pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0"},
+- {file = "pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20"},
++ {file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"},
++ {file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"},
++ {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ba4be812c7a40280629e55ae0b14a0aafa150dd6451297562e1764808bbe61"},
++ {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8bd62331e5032bc396a93609982a9ab6b411c05078a52f5fe3cc59234a3abd1"},
++ {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:562d11134c97a62fe3af29581f083033179f7ff435f78392565a1ad2d1c2c45c"},
++ {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:c97209e85b5be259994eb5b69ff50c5d20cca0f458ef9abd835e262d9d88b39d"},
++ {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0c3e6d0f59171dfa2e25d7116217543310908dfa2770aa64b8f87605f8cacc97"},
++ {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc1c3bc53befb6096b84165956e886b1729634a799e9d6329a0c512ab651e579"},
++ {file = "pillow-11.2.1-cp310-cp310-win32.whl", hash = "sha256:312c77b7f07ab2139924d2639860e084ec2a13e72af54d4f08ac843a5fc9c79d"},
++ {file = "pillow-11.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:9bc7ae48b8057a611e5fe9f853baa88093b9a76303937449397899385da06fad"},
++ {file = "pillow-11.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:2728567e249cdd939f6cc3d1f049595c66e4187f3c34078cbc0a7d21c47482d2"},
++ {file = "pillow-11.2.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70"},
++ {file = "pillow-11.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf"},
++ {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7"},
++ {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8"},
++ {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600"},
++ {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788"},
++ {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e"},
++ {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e"},
++ {file = "pillow-11.2.1-cp311-cp311-win32.whl", hash = "sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6"},
++ {file = "pillow-11.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193"},
++ {file = "pillow-11.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7"},
++ {file = "pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f"},
++ {file = "pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b"},
++ {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d"},
++ {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4"},
++ {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d"},
++ {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4"},
++ {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443"},
++ {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c"},
++ {file = "pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3"},
++ {file = "pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941"},
++ {file = "pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb"},
++ {file = "pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28"},
++ {file = "pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830"},
++ {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0"},
++ {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1"},
++ {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f"},
++ {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155"},
++ {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14"},
++ {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b"},
++ {file = "pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2"},
++ {file = "pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691"},
++ {file = "pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c"},
++ {file = "pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22"},
++ {file = "pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7"},
++ {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16"},
++ {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b"},
++ {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406"},
++ {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91"},
++ {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751"},
++ {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9"},
++ {file = "pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd"},
++ {file = "pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e"},
++ {file = "pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681"},
++ {file = "pillow-11.2.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:7491cf8a79b8eb867d419648fff2f83cb0b3891c8b36da92cc7f1931d46108c8"},
++ {file = "pillow-11.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b02d8f9cb83c52578a0b4beadba92e37d83a4ef11570a8688bbf43f4ca50909"},
++ {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:014ca0050c85003620526b0ac1ac53f56fc93af128f7546623cc8e31875ab928"},
++ {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3692b68c87096ac6308296d96354eddd25f98740c9d2ab54e1549d6c8aea9d79"},
++ {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:f781dcb0bc9929adc77bad571b8621ecb1e4cdef86e940fe2e5b5ee24fd33b35"},
++ {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:2b490402c96f907a166615e9a5afacf2519e28295f157ec3a2bb9bd57de638cb"},
++ {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dd6b20b93b3ccc9c1b597999209e4bc5cf2853f9ee66e3fc9a400a78733ffc9a"},
++ {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4b835d89c08a6c2ee7781b8dd0a30209a8012b5f09c0a665b65b0eb3560b6f36"},
++ {file = "pillow-11.2.1-cp39-cp39-win32.whl", hash = "sha256:b10428b3416d4f9c61f94b494681280be7686bda15898a3a9e08eb66a6d92d67"},
++ {file = "pillow-11.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:6ebce70c3f486acf7591a3d73431fa504a4e18a9b97ff27f5f47b7368e4b9dd1"},
++ {file = "pillow-11.2.1-cp39-cp39-win_arm64.whl", hash = "sha256:c27476257b2fdcd7872d54cfd119b3a9ce4610fb85c8e32b70b42e3680a29a1e"},
++ {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9b7b0d4fd2635f54ad82785d56bc0d94f147096493a79985d0ab57aedd563156"},
++ {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:aa442755e31c64037aa7c1cb186e0b369f8416c567381852c63444dd666fb772"},
++ {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0d3348c95b766f54b76116d53d4cb171b52992a1027e7ca50c81b43b9d9e363"},
++ {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85d27ea4c889342f7e35f6d56e7e1cb345632ad592e8c51b693d7b7556043ce0"},
++ {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bf2c33d6791c598142f00c9c4c7d47f6476731c31081331664eb26d6ab583e01"},
++ {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e616e7154c37669fc1dfc14584f11e284e05d1c650e1c0f972f281c4ccc53193"},
++ {file = "pillow-11.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:39ad2e0f424394e3aebc40168845fee52df1394a4673a6ee512d840d14ab3013"},
++ {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed"},
++ {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c"},
++ {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd"},
++ {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076"},
++ {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b"},
++ {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f"},
++ {file = "pillow-11.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044"},
++ {file = "pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6"},
+ ]
+
+ [package.extras]
+-docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
++docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
+ fpx = ["olefile"]
+ mic = ["olefile"]
++test-arrow = ["pyarrow"]
+ tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"]
+ typing = ["typing-extensions ; python_version < \"3.10\""]
+ xmp = ["defusedxml"]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0043-1.130.0rc1.patch b/packages/overlays/matrix-synapse/patches/0043-1.130.0rc1.patch
new file mode 100644
index 0000000..6d1ad3f
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0043-1.130.0rc1.patch
@@ -0,0 +1,351 @@
+From 09b4109c2e3740d754f9256540bd96444fb82453 Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erik@matrix.org>
+Date: Tue, 13 May 2025 10:44:11 +0100
+Subject: [PATCH 43/74] 1.130.0rc1
+
+---
+ CHANGES.md | 63 +++++++++++++++++++++++++++++++++++++++
+ changelog.d/17578.misc | 1 -
+ changelog.d/18181.misc | 1 -
+ changelog.d/18214.feature | 1 -
+ changelog.d/18218.doc | 1 -
+ changelog.d/18237.doc | 1 -
+ changelog.d/18291.docker | 1 -
+ changelog.d/18292.docker | 1 -
+ changelog.d/18293.docker | 1 -
+ changelog.d/18295.docker | 1 -
+ changelog.d/18297.misc | 1 -
+ changelog.d/18300.feature | 1 -
+ changelog.d/18313.misc | 1 -
+ changelog.d/18320.doc | 1 -
+ changelog.d/18330.misc | 1 -
+ changelog.d/18355.feature | 1 -
+ changelog.d/18360.misc | 1 -
+ changelog.d/18363.bugfix | 1 -
+ changelog.d/18367.misc | 1 -
+ changelog.d/18369.misc | 1 -
+ changelog.d/18374.misc | 1 -
+ changelog.d/18375.bugfix | 1 -
+ changelog.d/18377.doc | 1 -
+ changelog.d/18384.doc | 1 -
+ changelog.d/18385.misc | 1 -
+ changelog.d/18390.misc | 1 -
+ changelog.d/18399.misc | 1 -
+ debian/changelog | 6 ++++
+ pyproject.toml | 2 +-
+ 29 files changed, 70 insertions(+), 27 deletions(-)
+ delete mode 100644 changelog.d/17578.misc
+ delete mode 100644 changelog.d/18181.misc
+ delete mode 100644 changelog.d/18214.feature
+ delete mode 100644 changelog.d/18218.doc
+ delete mode 100644 changelog.d/18237.doc
+ delete mode 100644 changelog.d/18291.docker
+ delete mode 100644 changelog.d/18292.docker
+ delete mode 100644 changelog.d/18293.docker
+ delete mode 100644 changelog.d/18295.docker
+ delete mode 100644 changelog.d/18297.misc
+ delete mode 100644 changelog.d/18300.feature
+ delete mode 100644 changelog.d/18313.misc
+ delete mode 100644 changelog.d/18320.doc
+ delete mode 100644 changelog.d/18330.misc
+ delete mode 100644 changelog.d/18355.feature
+ delete mode 100644 changelog.d/18360.misc
+ delete mode 100644 changelog.d/18363.bugfix
+ delete mode 100644 changelog.d/18367.misc
+ delete mode 100644 changelog.d/18369.misc
+ delete mode 100644 changelog.d/18374.misc
+ delete mode 100644 changelog.d/18375.bugfix
+ delete mode 100644 changelog.d/18377.doc
+ delete mode 100644 changelog.d/18384.doc
+ delete mode 100644 changelog.d/18385.misc
+ delete mode 100644 changelog.d/18390.misc
+ delete mode 100644 changelog.d/18399.misc
+
+diff --git a/CHANGES.md b/CHANGES.md
+index f04c7ef026..235d65c746 100644
+--- a/CHANGES.md
++++ b/CHANGES.md
+@@ -1,3 +1,66 @@
++# Synapse 1.130.0rc1 (2025-05-13)
++
++### Features
++
++- Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. ([\#18214](https://github.com/element-hq/synapse/issues/18214))
++- Add config option `user_directory.exclude_remote_users` which, when enabled, excludes remote users from user directory search results. ([\#18300](https://github.com/element-hq/synapse/issues/18300))
++- Add support for handling `GET /devices/` on workers. ([\#18355](https://github.com/element-hq/synapse/issues/18355))
++
++### Bugfixes
++
++- Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. ([\#18363](https://github.com/element-hq/synapse/issues/18363))
++- Pass leave from remote invite rejection down Sliding Sync. ([\#18375](https://github.com/element-hq/synapse/issues/18375))
++
++### Updates to the Docker image
++
++- In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. ([\#18291](https://github.com/element-hq/synapse/issues/18291))
++- Optimize the build of the workers image. ([\#18292](https://github.com/element-hq/synapse/issues/18292))
++- In start_for_complement.sh, replace some external program calls with shell builtins. ([\#18293](https://github.com/element-hq/synapse/issues/18293))
++- When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly. ([\#18295](https://github.com/element-hq/synapse/issues/18295))
++
++### Improved Documentation
++
++- Improve formatting of the README file. ([\#18218](https://github.com/element-hq/synapse/issues/18218))
++- Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider. ([\#18237](https://github.com/element-hq/synapse/issues/18237))
++- Fix typo in docs about the `push` config option. Contributed by @HarHarLinks. ([\#18320](https://github.com/element-hq/synapse/issues/18320))
++- Add `/_matrix/federation/v1/version` to list of federation endpoints that can be handled by workers. ([\#18377](https://github.com/element-hq/synapse/issues/18377))
++- Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. ([\#18384](https://github.com/element-hq/synapse/issues/18384))
++
++### Internal Changes
++
++- Return specific error code when adding an email address / phone number to account is not supported (MSC4178). ([\#17578](https://github.com/element-hq/synapse/issues/17578))
++- Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later. ([\#18181](https://github.com/element-hq/synapse/issues/18181))
++- Apply file hashing and existing quarantines to media downloaded for URL previews. ([\#18297](https://github.com/element-hq/synapse/issues/18297))
++- Allow a few admin APIs used by matrix-authentication-service to run on workers. ([\#18313](https://github.com/element-hq/synapse/issues/18313))
++- Apply `should_drop_federated_event` to federation invites. ([\#18330](https://github.com/element-hq/synapse/issues/18330))
++- Allow `/rooms/` admin API to be run on workers. ([\#18360](https://github.com/element-hq/synapse/issues/18360))
++- Minor performance improvements to the notifier. ([\#18367](https://github.com/element-hq/synapse/issues/18367))
++- Slight performance increase when using the ratelimiter. ([\#18369](https://github.com/element-hq/synapse/issues/18369))
++- Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token. ([\#18374](https://github.com/element-hq/synapse/issues/18374), [\#18385](https://github.com/element-hq/synapse/issues/18385))
++- Fixed test failures when using authlib 1.5.2. ([\#18390](https://github.com/element-hq/synapse/issues/18390))
++- Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Simplified Sliding Sync room list tests to cover both new and fallback logic paths. ([\#18399](https://github.com/element-hq/synapse/issues/18399))
++
++
++
++### Updates to locked dependencies
++
++* Bump actions/add-to-project from 280af8ae1f83a494cfad2cb10f02f6d13529caa9 to 5b1a254a3546aef88e0a7724a77a623fa2e47c36. ([\#18365](https://github.com/element-hq/synapse/issues/18365))
++* Bump actions/download-artifact from 4.2.1 to 4.3.0. ([\#18364](https://github.com/element-hq/synapse/issues/18364))
++* Bump actions/setup-go from 5.4.0 to 5.5.0. ([\#18426](https://github.com/element-hq/synapse/issues/18426))
++* Bump anyhow from 1.0.97 to 1.0.98. ([\#18336](https://github.com/element-hq/synapse/issues/18336))
++* Bump packaging from 24.2 to 25.0. ([\#18393](https://github.com/element-hq/synapse/issues/18393))
++* Bump pillow from 11.1.0 to 11.2.1. ([\#18429](https://github.com/element-hq/synapse/issues/18429))
++* Bump pydantic from 2.10.3 to 2.11.4. ([\#18394](https://github.com/element-hq/synapse/issues/18394))
++* Bump pyo3-log from 0.12.2 to 0.12.3. ([\#18317](https://github.com/element-hq/synapse/issues/18317))
++* Bump pyopenssl from 24.3.0 to 25.0.0. ([\#18315](https://github.com/element-hq/synapse/issues/18315))
++* Bump sha2 from 0.10.8 to 0.10.9. ([\#18395](https://github.com/element-hq/synapse/issues/18395))
++* Bump sigstore/cosign-installer from 3.8.1 to 3.8.2. ([\#18366](https://github.com/element-hq/synapse/issues/18366))
++* Bump softprops/action-gh-release from 1 to 2. ([\#18264](https://github.com/element-hq/synapse/issues/18264))
++* Bump stefanzweifel/git-auto-commit-action from 5.1.0 to 5.2.0. ([\#18354](https://github.com/element-hq/synapse/issues/18354))
++* Bump txredisapi from 1.4.10 to 1.4.11. ([\#18392](https://github.com/element-hq/synapse/issues/18392))
++* Bump types-jsonschema from 4.23.0.20240813 to 4.23.0.20241208. ([\#18305](https://github.com/element-hq/synapse/issues/18305))
++* Bump types-psycopg2 from 2.9.21.20250121 to 2.9.21.20250318. ([\#18316](https://github.com/element-hq/synapse/issues/18316))
++
+ # Synapse 1.129.0 (2025-05-06)
+
+ No significant changes since 1.129.0rc2.
+diff --git a/changelog.d/17578.misc b/changelog.d/17578.misc
+deleted file mode 100644
+index 7bf69576cd..0000000000
+--- a/changelog.d/17578.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Return specific error code when adding an email address / phone number to account is not supported (MSC4178).
+diff --git a/changelog.d/18181.misc b/changelog.d/18181.misc
+deleted file mode 100644
+index d9ba2f1dd1..0000000000
+--- a/changelog.d/18181.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later.
+diff --git a/changelog.d/18214.feature b/changelog.d/18214.feature
+deleted file mode 100644
+index 751cb7d383..0000000000
+--- a/changelog.d/18214.feature
++++ /dev/null
+@@ -1 +0,0 @@
+-Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks.
+\ No newline at end of file
+diff --git a/changelog.d/18218.doc b/changelog.d/18218.doc
+deleted file mode 100644
+index f62da6a0b9..0000000000
+--- a/changelog.d/18218.doc
++++ /dev/null
+@@ -1 +0,0 @@
+-Improve formatting of the README file.
+diff --git a/changelog.d/18237.doc b/changelog.d/18237.doc
+deleted file mode 100644
+index 872f7cab7d..0000000000
+--- a/changelog.d/18237.doc
++++ /dev/null
+@@ -1 +0,0 @@
+-Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider.
+\ No newline at end of file
+diff --git a/changelog.d/18291.docker b/changelog.d/18291.docker
+deleted file mode 100644
+index b94c0e80e3..0000000000
+--- a/changelog.d/18291.docker
++++ /dev/null
+@@ -1 +0,0 @@
+-In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`.
+diff --git a/changelog.d/18292.docker b/changelog.d/18292.docker
+deleted file mode 100644
+index cdb95b369b..0000000000
+--- a/changelog.d/18292.docker
++++ /dev/null
+@@ -1 +0,0 @@
+-Optimize the build of the workers image.
+diff --git a/changelog.d/18293.docker b/changelog.d/18293.docker
+deleted file mode 100644
+index df47a68bfe..0000000000
+--- a/changelog.d/18293.docker
++++ /dev/null
+@@ -1 +0,0 @@
+-In start_for_complement.sh, replace some external program calls with shell builtins.
+diff --git a/changelog.d/18295.docker b/changelog.d/18295.docker
+deleted file mode 100644
+index 239def1f54..0000000000
+--- a/changelog.d/18295.docker
++++ /dev/null
+@@ -1 +0,0 @@
+-When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly.
+diff --git a/changelog.d/18297.misc b/changelog.d/18297.misc
+deleted file mode 100644
+index 5032d48174..0000000000
+--- a/changelog.d/18297.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Apply file hashing and existing quarantines to media downloaded for URL previews.
+diff --git a/changelog.d/18300.feature b/changelog.d/18300.feature
+deleted file mode 100644
+index 92bea77556..0000000000
+--- a/changelog.d/18300.feature
++++ /dev/null
+@@ -1 +0,0 @@
+-Add config option `user_directory.exclude_remote_users` which, when enabled, excludes remote users from user directory search results.
+\ No newline at end of file
+diff --git a/changelog.d/18313.misc b/changelog.d/18313.misc
+deleted file mode 100644
+index febf3ac06e..0000000000
+--- a/changelog.d/18313.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Allow a few admin APIs used by matrix-authentication-service to run on workers.
+diff --git a/changelog.d/18320.doc b/changelog.d/18320.doc
+deleted file mode 100644
+index d84c279940..0000000000
+--- a/changelog.d/18320.doc
++++ /dev/null
+@@ -1 +0,0 @@
+-Fix typo in docs about the `push` config option. Contributed by @HarHarLinks.
+diff --git a/changelog.d/18330.misc b/changelog.d/18330.misc
+deleted file mode 100644
+index dcf341fa34..0000000000
+--- a/changelog.d/18330.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Apply `should_drop_federated_event` to federation invites.
+diff --git a/changelog.d/18355.feature b/changelog.d/18355.feature
+deleted file mode 100644
+index 4813f0a291..0000000000
+--- a/changelog.d/18355.feature
++++ /dev/null
+@@ -1 +0,0 @@
+-Add support for handling `GET /devices/` on workers.
+diff --git a/changelog.d/18360.misc b/changelog.d/18360.misc
+deleted file mode 100644
+index e5bf4f536f..0000000000
+--- a/changelog.d/18360.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Allow `/rooms/` admin API to be run on workers.
+diff --git a/changelog.d/18363.bugfix b/changelog.d/18363.bugfix
+deleted file mode 100644
+index bfa336d52f..0000000000
+--- a/changelog.d/18363.bugfix
++++ /dev/null
+@@ -1 +0,0 @@
+-Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers.
+diff --git a/changelog.d/18367.misc b/changelog.d/18367.misc
+deleted file mode 100644
+index 2e8b897fa6..0000000000
+--- a/changelog.d/18367.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Minor performance improvements to the notifier.
+diff --git a/changelog.d/18369.misc b/changelog.d/18369.misc
+deleted file mode 100644
+index f4c0e5f006..0000000000
+--- a/changelog.d/18369.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Slight performance increase when using the ratelimiter.
+diff --git a/changelog.d/18374.misc b/changelog.d/18374.misc
+deleted file mode 100644
+index a8efca68d0..0000000000
+--- a/changelog.d/18374.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token.
+\ No newline at end of file
+diff --git a/changelog.d/18375.bugfix b/changelog.d/18375.bugfix
+deleted file mode 100644
+index faebe6f046..0000000000
+--- a/changelog.d/18375.bugfix
++++ /dev/null
+@@ -1 +0,0 @@
+-Pass leave from remote invite rejection down Sliding Sync.
+diff --git a/changelog.d/18377.doc b/changelog.d/18377.doc
+deleted file mode 100644
+index ceb2b64e5d..0000000000
+--- a/changelog.d/18377.doc
++++ /dev/null
+@@ -1 +0,0 @@
+-Add `/_matrix/federation/v1/version` to list of federation endpoints that can be handled by workers.
+diff --git a/changelog.d/18384.doc b/changelog.d/18384.doc
+deleted file mode 100644
+index ebcd029639..0000000000
+--- a/changelog.d/18384.doc
++++ /dev/null
+@@ -1 +0,0 @@
+-Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks.
+diff --git a/changelog.d/18385.misc b/changelog.d/18385.misc
+deleted file mode 100644
+index a8efca68d0..0000000000
+--- a/changelog.d/18385.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token.
+\ No newline at end of file
+diff --git a/changelog.d/18390.misc b/changelog.d/18390.misc
+deleted file mode 100644
+index e9a08dcfbf..0000000000
+--- a/changelog.d/18390.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Fixed test failures when using authlib 1.5.2.
+diff --git a/changelog.d/18399.misc b/changelog.d/18399.misc
+deleted file mode 100644
+index 847dc9a2b1..0000000000
+--- a/changelog.d/18399.misc
++++ /dev/null
+@@ -1 +0,0 @@
+-Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Simplified Sliding Sync room list tests to cover both new and fallback logic paths.
+diff --git a/debian/changelog b/debian/changelog
+index 2c1cb20624..e3eb894851 100644
+--- a/debian/changelog
++++ b/debian/changelog
+@@ -1,3 +1,9 @@
++matrix-synapse-py3 (1.130.0~rc1) stable; urgency=medium
++
++ * New Synapse release 1.130.0rc1.
++
++ -- Synapse Packaging team <packages@matrix.org> Tue, 13 May 2025 10:44:04 +0100
++
+ matrix-synapse-py3 (1.129.0) stable; urgency=medium
+
+ * New Synapse release 1.129.0.
+diff --git a/pyproject.toml b/pyproject.toml
+index 24ae0db05c..5f80d28344 100644
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
+
+ [tool.poetry]
+ name = "matrix-synapse"
+-version = "1.129.0"
++version = "1.130.0rc1"
+ description = "Homeserver for the Matrix decentralised comms protocol"
+ authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
+ license = "AGPL-3.0-or-later"
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0044-Fix-up-changelog.patch b/packages/overlays/matrix-synapse/patches/0044-Fix-up-changelog.patch
new file mode 100644
index 0000000..dc863db
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0044-Fix-up-changelog.patch
@@ -0,0 +1,43 @@
+From 99c15f4630a7c9983c1b134505eaab703c138ea9 Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erik@matrix.org>
+Date: Tue, 13 May 2025 10:54:23 +0100
+Subject: [PATCH 44/74] Fix up changelog
+
+---
+ CHANGES.md | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/CHANGES.md b/CHANGES.md
+index 235d65c746..a0a9d2f064 100644
+--- a/CHANGES.md
++++ b/CHANGES.md
+@@ -8,14 +8,14 @@
+
+ ### Bugfixes
+
+-- Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. ([\#18363](https://github.com/element-hq/synapse/issues/18363))
++- Fix a longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. ([\#18363](https://github.com/element-hq/synapse/issues/18363))
+ - Pass leave from remote invite rejection down Sliding Sync. ([\#18375](https://github.com/element-hq/synapse/issues/18375))
+
+ ### Updates to the Docker image
+
+-- In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. ([\#18291](https://github.com/element-hq/synapse/issues/18291))
++- In `configure_workers_and_start.py`, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. ([\#18291](https://github.com/element-hq/synapse/issues/18291))
+ - Optimize the build of the workers image. ([\#18292](https://github.com/element-hq/synapse/issues/18292))
+-- In start_for_complement.sh, replace some external program calls with shell builtins. ([\#18293](https://github.com/element-hq/synapse/issues/18293))
++- In `start_for_complement.sh`, replace some external program calls with shell builtins. ([\#18293](https://github.com/element-hq/synapse/issues/18293))
+ - When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly. ([\#18295](https://github.com/element-hq/synapse/issues/18295))
+
+ ### Improved Documentation
+@@ -28,7 +28,7 @@
+
+ ### Internal Changes
+
+-- Return specific error code when adding an email address / phone number to account is not supported (MSC4178). ([\#17578](https://github.com/element-hq/synapse/issues/17578))
++- Return specific error code when adding an email address / phone number to account is not supported ([MSC4178](https://github.com/matrix-org/matrix-spec-proposals/pull/4178)). ([\#17578](https://github.com/element-hq/synapse/issues/17578))
+ - Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later. ([\#18181](https://github.com/element-hq/synapse/issues/18181))
+ - Apply file hashing and existing quarantines to media downloaded for URL previews. ([\#18297](https://github.com/element-hq/synapse/issues/18297))
+ - Allow a few admin APIs used by matrix-authentication-service to run on workers. ([\#18313](https://github.com/element-hq/synapse/issues/18313))
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0045-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch b/packages/overlays/matrix-synapse/patches/0045-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch
new file mode 100644
index 0000000..b448370
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0045-Bump-mypy-zope-from-1.0.9-to-1.0.11-18428.patch
@@ -0,0 +1,39 @@
+From c626d54cea3a99200c162a2578550e56242e8213 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 13 May 2025 15:12:22 +0100
+Subject: [PATCH 45/74] Bump mypy-zope from 1.0.9 to 1.0.11 (#18428)
+
+---
+ poetry.lock | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index 1935df638a..b6e32f43b8 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -1511,18 +1511,18 @@ files = [
+
+ [[package]]
+ name = "mypy-zope"
+-version = "1.0.9"
++version = "1.0.11"
+ description = "Plugin for mypy to support zope interfaces"
+ optional = false
+ python-versions = "*"
+ groups = ["dev"]
+ files = [
+- {file = "mypy_zope-1.0.9-py3-none-any.whl", hash = "sha256:6666c1556891a3cb186137519dbd7a58cb30fb72b2504798cad47b35391921ba"},
+- {file = "mypy_zope-1.0.9.tar.gz", hash = "sha256:37d6985dfb05a4c27b35cff47577fd5bad878db4893ddedf54d165f7389a1cdb"},
++ {file = "mypy_zope-1.0.11-py3-none-any.whl", hash = "sha256:4395d716b43ab89916edf6d0b5761655b4d4a43b2692fce806bbd733829977ee"},
++ {file = "mypy_zope-1.0.11.tar.gz", hash = "sha256:1c95e49e9dcdf070a0858f067dac55e8e4e47519fdc15dfdab9b7eee273a0e01"},
+ ]
+
+ [package.dependencies]
+-mypy = ">=1.0.0,<1.14.0"
++mypy = ">=1.0.0,<1.16.0"
+ "zope.interface" = "*"
+ "zope.schema" = "*"
+
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0046-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch b/packages/overlays/matrix-synapse/patches/0046-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch
new file mode 100644
index 0000000..744bae9
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0046-Bump-types-requests-from-2.32.0.20241016-to-2.32.0.2.patch
@@ -0,0 +1,36 @@
+From ba2f1be891a4dbc2fe55af968dd72a146a8c9068 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 13 May 2025 15:12:34 +0100
+Subject: [PATCH 46/74] Bump types-requests from 2.32.0.20241016 to
+ 2.32.0.20250328 (#18427)
+
+---
+ poetry.lock | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index b6e32f43b8..7d7868d37f 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -3059,14 +3059,14 @@ files = [
+
+ [[package]]
+ name = "types-requests"
+-version = "2.32.0.20241016"
++version = "2.32.0.20250328"
+ description = "Typing stubs for requests"
+ optional = false
+-python-versions = ">=3.8"
++python-versions = ">=3.9"
+ groups = ["dev"]
+ files = [
+- {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"},
+- {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"},
++ {file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"},
++ {file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"},
+ ]
+
+ [package.dependencies]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0047-Remove-newline-from-final-bullet-point-of-PR-templat.patch b/packages/overlays/matrix-synapse/patches/0047-Remove-newline-from-final-bullet-point-of-PR-templat.patch
new file mode 100644
index 0000000..7163a97
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0047-Remove-newline-from-final-bullet-point-of-PR-templat.patch
@@ -0,0 +1,34 @@
+From 480d4faa38401f37b0b5608356ee1959aa5829c8 Mon Sep 17 00:00:00 2001
+From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+Date: Tue, 13 May 2025 15:14:00 +0100
+Subject: [PATCH 47/74] Remove newline from final bullet point of PR template
+ (#18419)
+
+---
+ .github/PULL_REQUEST_TEMPLATE.md | 3 +--
+ changelog.d/18419.misc | 1 +
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+ create mode 100644 changelog.d/18419.misc
+
+diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
+index 07d4f6dfce..f8e60815fa 100644
+--- a/.github/PULL_REQUEST_TEMPLATE.md
++++ b/.github/PULL_REQUEST_TEMPLATE.md
+@@ -9,5 +9,4 @@
+ - End with either a period (.) or an exclamation mark (!).
+ - Start with a capital letter.
+ - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
+-* [ ] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct
+- (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
++* [ ] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
+diff --git a/changelog.d/18419.misc b/changelog.d/18419.misc
+new file mode 100644
+index 0000000000..0ff36e27b8
+--- /dev/null
++++ b/changelog.d/18419.misc
+@@ -0,0 +1 @@
++Update the PR review template to remove an erroneous line break from the final bullet point.
+\ No newline at end of file
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0048-Explicitly-enable-pypy-for-cibuildwheel-18417.patch b/packages/overlays/matrix-synapse/patches/0048-Explicitly-enable-pypy-for-cibuildwheel-18417.patch
new file mode 100644
index 0000000..bd1678c
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0048-Explicitly-enable-pypy-for-cibuildwheel-18417.patch
@@ -0,0 +1,262 @@
+From 2db54c88ff54a5377d96088c23ac1f4dfef8faf3 Mon Sep 17 00:00:00 2001
+From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+Date: Tue, 13 May 2025 15:19:30 +0100
+Subject: [PATCH 48/74] Explicitly enable pypy for cibuildwheel (#18417)
+
+---
+ changelog.d/18417.misc | 1 +
+ poetry.lock | 49 +++++++++++++++++++++---------------------
+ pyproject.toml | 3 +++
+ 3 files changed, 28 insertions(+), 25 deletions(-)
+ create mode 100644 changelog.d/18417.misc
+
+diff --git a/changelog.d/18417.misc b/changelog.d/18417.misc
+new file mode 100644
+index 0000000000..5f650a202a
+--- /dev/null
++++ b/changelog.d/18417.misc
+@@ -0,0 +1 @@
++Explicitly enable PyPy builds in `cibuildwheel`s config to avoid it being disabled on a future upgrade to `cibuildwheel` v3.
+\ No newline at end of file
+diff --git a/poetry.lock b/poetry.lock
+index 7d7868d37f..7190d0f788 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -1,4 +1,4 @@
+-# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
++# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
+
+ [[package]]
+ name = "annotated-types"
+@@ -39,7 +39,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect
+ optional = true
+ python-versions = ">=3.9"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
++markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
+ files = [
+ {file = "authlib-1.5.1-py2.py3-none-any.whl", hash = "sha256:8408861cbd9b4ea2ff759b00b6f02fd7d81ac5a56d0b2b22c08606c6049aae11"},
+ {file = "authlib-1.5.1.tar.gz", hash = "sha256:5cbc85ecb0667312c1cdc2f9095680bb735883b123fb509fde1e65b1c5df972e"},
+@@ -451,7 +451,7 @@ description = "XML bomb protection for Python stdlib modules"
+ optional = true
+ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"saml2\""
++markers = "extra == \"saml2\" or extra == \"all\""
+ files = [
+ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
+ {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
+@@ -494,7 +494,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
+ optional = true
+ python-versions = ">=3.7"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"saml2\""
++markers = "extra == \"saml2\" or extra == \"all\""
+ files = [
+ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
+ {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
+@@ -544,7 +544,7 @@ description = "Python wrapper for hiredis"
+ optional = true
+ python-versions = ">=3.8"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"redis\""
++markers = "extra == \"redis\" or extra == \"all\""
+ files = [
+ {file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"},
+ {file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"},
+@@ -890,7 +890,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
+ optional = true
+ python-versions = ">=3.7"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"opentracing\""
++markers = "extra == \"opentracing\" or extra == \"all\""
+ files = [
+ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
+ ]
+@@ -1028,7 +1028,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
++markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
+ files = [
+ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
+ {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
+@@ -1044,7 +1044,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
+ optional = true
+ python-versions = ">=3.6"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"url-preview\""
++markers = "extra == \"url-preview\" or extra == \"all\""
+ files = [
+ {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"},
+ {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"},
+@@ -1330,7 +1330,7 @@ description = "An LDAP3 auth provider for Synapse"
+ optional = true
+ python-versions = ">=3.7"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
++markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
+ files = [
+ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
+ {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
+@@ -1551,7 +1551,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"opentracing\""
++markers = "extra == \"opentracing\" or extra == \"all\""
+ files = [
+ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
+ ]
+@@ -1720,7 +1720,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
+ optional = true
+ python-versions = ">=3.8"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"postgres\""
++markers = "extra == \"postgres\" or extra == \"all\""
+ files = [
+ {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
+ {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
+@@ -1728,7 +1728,6 @@ files = [
+ {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
+ {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
+ {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
+- {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
+ {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
+ {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
+ {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
+@@ -1741,7 +1740,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
++markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
+ files = [
+ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
+ ]
+@@ -1757,7 +1756,7 @@ description = "A Simple library to enable psycopg2 compatability"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
++markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
+ files = [
+ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
+ ]
+@@ -1980,7 +1979,7 @@ description = "Python extension wrapping the ICU C++ API"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"user-search\""
++markers = "extra == \"user-search\" or extra == \"all\""
+ files = [
+ {file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
+ ]
+@@ -2029,7 +2028,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
+ optional = true
+ python-versions = ">=3.6"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"cache-memory\""
++markers = "extra == \"cache-memory\" or extra == \"all\""
+ files = [
+ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
+ {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
+@@ -2089,7 +2088,7 @@ description = "Python implementation of SAML Version 2 Standard"
+ optional = true
+ python-versions = ">=3.9,<4.0"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"saml2\""
++markers = "extra == \"saml2\" or extra == \"all\""
+ files = [
+ {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
+ {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
+@@ -2114,7 +2113,7 @@ description = "Extensions to the standard Python datetime module"
+ optional = true
+ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"saml2\""
++markers = "extra == \"saml2\" or extra == \"all\""
+ files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+@@ -2142,7 +2141,7 @@ description = "World timezone definitions, modern and historical"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"saml2\""
++markers = "extra == \"saml2\" or extra == \"all\""
+ files = [
+ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
+ {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
+@@ -2506,7 +2505,7 @@ description = "Python client for Sentry (https://sentry.io)"
+ optional = true
+ python-versions = ">=3.6"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"sentry\""
++markers = "extra == \"sentry\" or extra == \"all\""
+ files = [
+ {file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"},
+ {file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"},
+@@ -2690,7 +2689,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"opentracing\""
++markers = "extra == \"opentracing\" or extra == \"all\""
+ files = [
+ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
+ {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
+@@ -2706,7 +2705,7 @@ description = "Python bindings for the Apache Thrift RPC system"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"opentracing\""
++markers = "extra == \"opentracing\" or extra == \"all\""
+ files = [
+ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
+ ]
+@@ -2768,7 +2767,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
+ optional = true
+ python-versions = ">=3.8"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"opentracing\""
++markers = "extra == \"opentracing\" or extra == \"all\""
+ files = [
+ {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"},
+ {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"},
+@@ -2902,7 +2901,7 @@ description = "non-blocking redis client for python"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"redis\""
++markers = "extra == \"redis\" or extra == \"all\""
+ files = [
+ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
+ {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
+@@ -3245,7 +3244,7 @@ description = "An XML Schema validator and decoder"
+ optional = true
+ python-versions = ">=3.7"
+ groups = ["main"]
+-markers = "extra == \"all\" or extra == \"saml2\""
++markers = "extra == \"saml2\" or extra == \"all\""
+ files = [
+ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
+ {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
+diff --git a/pyproject.toml b/pyproject.toml
+index 5f80d28344..914a5804aa 100644
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -385,6 +385,9 @@ build-backend = "poetry.core.masonry.api"
+ # - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
+ # c.f. https://github.com/matrix-org/synapse/pull/14259
+ skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
++# Enable non-default builds.
++# "pypy" used to be included by default up until cibuildwheel 3.
++enable = "pypy"
+
+ # We need a rust compiler.
+ #
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0049-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch b/packages/overlays/matrix-synapse/patches/0049-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch
new file mode 100644
index 0000000..eb5a706
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0049-Fix-a-couple-type-annotations-in-the-RootConfig-Conf.patch
@@ -0,0 +1,360 @@
+From 6e910e2b2c5cef393473dcc6bf957a8671a1186e Mon Sep 17 00:00:00 2001
+From: Eric Eastwood <erice@element.io>
+Date: Tue, 13 May 2025 10:22:15 -0500
+Subject: [PATCH 49/74] Fix a couple type annotations in the
+ `RootConfig`/`Config` (#18409)
+
+Fix a couple type annotations in the `RootConfig`/`Config`. Discovered
+while cribbing this code for another project.
+
+It's really sucks that `mypy` type checking doesn't catch this. I assume
+this is because we also have a `synapse/config/_base.pyi` that overrides
+all of this. Still unclear to me why the `Iterable[str]` vs
+`StrSequence` issue wasn't caught as that's what `ConfigError` expects.
+---
+ changelog.d/18409.misc | 1 +
+ synapse/config/_base.py | 6 +++---
+ synapse/config/_base.pyi | 4 ++--
+ synapse/config/experimental.py | 6 +++---
+ synapse/config/key.py | 6 ++++--
+ synapse/config/workers.py | 2 +-
+ tests/config/test_api.py | 3 ++-
+ tests/config/test_appservice.py | 7 ++++---
+ tests/config/test_cache.py | 3 ++-
+ tests/config/test_database.py | 5 ++++-
+ tests/config/test_room_directory.py | 5 +++--
+ tests/config/test_server.py | 10 +++++-----
+ tests/events/test_auto_accept_invites.py | 5 +++--
+ 13 files changed, 37 insertions(+), 26 deletions(-)
+ create mode 100644 changelog.d/18409.misc
+
+diff --git a/changelog.d/18409.misc b/changelog.d/18409.misc
+new file mode 100644
+index 0000000000..bbb9bdbb1b
+--- /dev/null
++++ b/changelog.d/18409.misc
+@@ -0,0 +1 @@
++Fix a couple type annotations in the `RootConfig`/`Config`.
+diff --git a/synapse/config/_base.py b/synapse/config/_base.py
+index 132ba26af9..d367d45fea 100644
+--- a/synapse/config/_base.py
++++ b/synapse/config/_base.py
+@@ -170,7 +170,7 @@ class Config:
+
+ section: ClassVar[str]
+
+- def __init__(self, root_config: "RootConfig" = None):
++ def __init__(self, root_config: "RootConfig"):
+ self.root = root_config
+
+ # Get the path to the default Synapse template directory
+@@ -445,7 +445,7 @@ class RootConfig:
+ return res
+
+ @classmethod
+- def invoke_all_static(cls, func_name: str, *args: Any, **kwargs: any) -> None:
++ def invoke_all_static(cls, func_name: str, *args: Any, **kwargs: Any) -> None:
+ """
+ Invoke a static function on config objects this RootConfig is
+ configured to use.
+@@ -1047,7 +1047,7 @@ class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
+ return self._get_instance(key)
+
+
+-def read_file(file_path: Any, config_path: Iterable[str]) -> str:
++def read_file(file_path: Any, config_path: StrSequence) -> str:
+ """Check the given file exists, and read it into a string
+
+ If it does not, emit an error indicating the problem
+diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
+index 55b0e2cbf4..9c4ec8f713 100644
+--- a/synapse/config/_base.pyi
++++ b/synapse/config/_base.pyi
+@@ -179,7 +179,7 @@ class RootConfig:
+ class Config:
+ root: RootConfig
+ default_template_dir: str
+- def __init__(self, root_config: Optional[RootConfig] = ...) -> None: ...
++ def __init__(self, root_config: RootConfig = ...) -> None: ...
+ @staticmethod
+ def parse_size(value: Union[str, int]) -> int: ...
+ @staticmethod
+@@ -212,4 +212,4 @@ class ShardedWorkerHandlingConfig:
+ class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
+ def get_instance(self, key: str) -> str: ... # noqa: F811
+
+-def read_file(file_path: Any, config_path: Iterable[str]) -> str: ...
++def read_file(file_path: Any, config_path: StrSequence) -> str: ...
+diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
+index 0a963b121a..1226eaa58a 100644
+--- a/synapse/config/experimental.py
++++ b/synapse/config/experimental.py
+@@ -21,7 +21,7 @@
+
+ import enum
+ from functools import cache
+-from typing import TYPE_CHECKING, Any, Iterable, Optional
++from typing import TYPE_CHECKING, Any, Optional
+
+ import attr
+ import attr.validators
+@@ -29,7 +29,7 @@ import attr.validators
+ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
+ from synapse.config import ConfigError
+ from synapse.config._base import Config, RootConfig, read_file
+-from synapse.types import JsonDict
++from synapse.types import JsonDict, StrSequence
+
+ # Determine whether authlib is installed.
+ try:
+@@ -45,7 +45,7 @@ if TYPE_CHECKING:
+
+
+ @cache
+-def read_secret_from_file_once(file_path: Any, config_path: Iterable[str]) -> str:
++def read_secret_from_file_once(file_path: Any, config_path: StrSequence) -> str:
+ """Returns the memoized secret read from file."""
+ return read_file(file_path, config_path).strip()
+
+diff --git a/synapse/config/key.py b/synapse/config/key.py
+index 337f98dbc1..29c558448b 100644
+--- a/synapse/config/key.py
++++ b/synapse/config/key.py
+@@ -191,7 +191,7 @@ class KeyConfig(Config):
+ if macaroon_secret_key:
+ raise ConfigError(CONFLICTING_MACAROON_SECRET_KEY_OPTS_ERROR)
+ macaroon_secret_key = read_file(
+- macaroon_secret_key_path, "macaroon_secret_key_path"
++ macaroon_secret_key_path, ("macaroon_secret_key_path",)
+ ).strip()
+ if not macaroon_secret_key:
+ macaroon_secret_key = self.root.registration.registration_shared_secret
+@@ -216,7 +216,9 @@ class KeyConfig(Config):
+ if form_secret_path:
+ if form_secret:
+ raise ConfigError(CONFLICTING_FORM_SECRET_OPTS_ERROR)
+- self.form_secret = read_file(form_secret_path, "form_secret_path").strip()
++ self.form_secret = read_file(
++ form_secret_path, ("form_secret_path",)
++ ).strip()
+ else:
+ self.form_secret = form_secret
+
+diff --git a/synapse/config/workers.py b/synapse/config/workers.py
+index 5af50ee952..2dfeb47c2e 100644
+--- a/synapse/config/workers.py
++++ b/synapse/config/workers.py
+@@ -263,7 +263,7 @@ class WorkerConfig(Config):
+ if worker_replication_secret:
+ raise ConfigError(CONFLICTING_WORKER_REPLICATION_SECRET_OPTS_ERROR)
+ self.worker_replication_secret = read_file(
+- worker_replication_secret_path, "worker_replication_secret_path"
++ worker_replication_secret_path, ("worker_replication_secret_path",)
+ ).strip()
+ else:
+ self.worker_replication_secret = worker_replication_secret
+diff --git a/tests/config/test_api.py b/tests/config/test_api.py
+index 6773c9a277..e6cc3e21ed 100644
+--- a/tests/config/test_api.py
++++ b/tests/config/test_api.py
+@@ -3,6 +3,7 @@ from unittest import TestCase as StdlibTestCase
+ import yaml
+
+ from synapse.config import ConfigError
++from synapse.config._base import RootConfig
+ from synapse.config.api import ApiConfig
+ from synapse.types.state import StateFilter
+
+@@ -19,7 +20,7 @@ DEFAULT_PREJOIN_STATE_PAIRS = {
+
+ class TestRoomPrejoinState(StdlibTestCase):
+ def read_config(self, source: str) -> ApiConfig:
+- config = ApiConfig()
++ config = ApiConfig(RootConfig())
+ config.read_config(yaml.safe_load(source))
+ return config
+
+diff --git a/tests/config/test_appservice.py b/tests/config/test_appservice.py
+index e3021b59d8..2572681224 100644
+--- a/tests/config/test_appservice.py
++++ b/tests/config/test_appservice.py
+@@ -19,6 +19,7 @@
+ #
+ #
+
++from synapse.config._base import RootConfig
+ from synapse.config.appservice import AppServiceConfig, ConfigError
+
+ from tests.unittest import TestCase
+@@ -36,12 +37,12 @@ class AppServiceConfigTest(TestCase):
+ ["foo", "bar", False],
+ ]:
+ with self.assertRaises(ConfigError):
+- AppServiceConfig().read_config(
++ AppServiceConfig(RootConfig()).read_config(
+ {"app_service_config_files": invalid_value}
+ )
+
+ def test_valid_app_service_config_files(self) -> None:
+- AppServiceConfig().read_config({"app_service_config_files": []})
+- AppServiceConfig().read_config(
++ AppServiceConfig(RootConfig()).read_config({"app_service_config_files": []})
++ AppServiceConfig(RootConfig()).read_config(
+ {"app_service_config_files": ["/not/a/real/path", "/not/a/real/path/2"]}
+ )
+diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py
+index 631263b5ca..aead73e059 100644
+--- a/tests/config/test_cache.py
++++ b/tests/config/test_cache.py
+@@ -19,6 +19,7 @@
+ #
+ #
+
++from synapse.config._base import RootConfig
+ from synapse.config.cache import CacheConfig, add_resizable_cache
+ from synapse.types import JsonDict
+ from synapse.util.caches.lrucache import LruCache
+@@ -29,7 +30,7 @@ from tests.unittest import TestCase
+ class CacheConfigTests(TestCase):
+ def setUp(self) -> None:
+ # Reset caches before each test since there's global state involved.
+- self.config = CacheConfig()
++ self.config = CacheConfig(RootConfig())
+ self.config.reset()
+
+ def tearDown(self) -> None:
+diff --git a/tests/config/test_database.py b/tests/config/test_database.py
+index b46519f84a..3fa5fff2b2 100644
+--- a/tests/config/test_database.py
++++ b/tests/config/test_database.py
+@@ -20,6 +20,7 @@
+
+ import yaml
+
++from synapse.config._base import RootConfig
+ from synapse.config.database import DatabaseConfig
+
+ from tests import unittest
+@@ -28,7 +29,9 @@ from tests import unittest
+ class DatabaseConfigTestCase(unittest.TestCase):
+ def test_database_configured_correctly(self) -> None:
+ conf = yaml.safe_load(
+- DatabaseConfig().generate_config_section(data_dir_path="/data_dir_path")
++ DatabaseConfig(RootConfig()).generate_config_section(
++ data_dir_path="/data_dir_path"
++ )
+ )
+
+ expected_database_conf = {
+diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py
+index e25f7787f4..5208381279 100644
+--- a/tests/config/test_room_directory.py
++++ b/tests/config/test_room_directory.py
+@@ -24,6 +24,7 @@ from twisted.test.proto_helpers import MemoryReactor
+ import synapse.rest.admin
+ import synapse.rest.client.login
+ import synapse.rest.client.room
++from synapse.config._base import RootConfig
+ from synapse.config.room_directory import RoomDirectoryConfig
+ from synapse.server import HomeServer
+ from synapse.util import Clock
+@@ -63,7 +64,7 @@ class RoomDirectoryConfigTestCase(unittest.HomeserverTestCase):
+ """
+ )
+
+- rd_config = RoomDirectoryConfig()
++ rd_config = RoomDirectoryConfig(RootConfig())
+ rd_config.read_config(config)
+
+ self.assertFalse(
+@@ -123,7 +124,7 @@ class RoomDirectoryConfigTestCase(unittest.HomeserverTestCase):
+ """
+ )
+
+- rd_config = RoomDirectoryConfig()
++ rd_config = RoomDirectoryConfig(RootConfig())
+ rd_config.read_config(config)
+
+ self.assertFalse(
+diff --git a/tests/config/test_server.py b/tests/config/test_server.py
+index 74073cfdc5..5eb2540439 100644
+--- a/tests/config/test_server.py
++++ b/tests/config/test_server.py
+@@ -20,7 +20,7 @@
+
+ import yaml
+
+-from synapse.config._base import ConfigError
++from synapse.config._base import ConfigError, RootConfig
+ from synapse.config.server import ServerConfig, generate_ip_set, is_threepid_reserved
+
+ from tests import unittest
+@@ -40,7 +40,7 @@ class ServerConfigTestCase(unittest.TestCase):
+
+ def test_unsecure_listener_no_listeners_open_private_ports_false(self) -> None:
+ conf = yaml.safe_load(
+- ServerConfig().generate_config_section(
++ ServerConfig(RootConfig()).generate_config_section(
+ "CONFDIR", "/data_dir_path", "che.org", False, None
+ )
+ )
+@@ -60,7 +60,7 @@ class ServerConfigTestCase(unittest.TestCase):
+
+ def test_unsecure_listener_no_listeners_open_private_ports_true(self) -> None:
+ conf = yaml.safe_load(
+- ServerConfig().generate_config_section(
++ ServerConfig(RootConfig()).generate_config_section(
+ "CONFDIR", "/data_dir_path", "che.org", True, None
+ )
+ )
+@@ -94,7 +94,7 @@ class ServerConfigTestCase(unittest.TestCase):
+ ]
+
+ conf = yaml.safe_load(
+- ServerConfig().generate_config_section(
++ ServerConfig(RootConfig()).generate_config_section(
+ "CONFDIR", "/data_dir_path", "this.one.listens", True, listeners
+ )
+ )
+@@ -128,7 +128,7 @@ class ServerConfigTestCase(unittest.TestCase):
+ expected_listeners[1]["bind_addresses"] = ["::1", "127.0.0.1"]
+
+ conf = yaml.safe_load(
+- ServerConfig().generate_config_section(
++ ServerConfig(RootConfig()).generate_config_section(
+ "CONFDIR", "/data_dir_path", "this.one.listens", True, listeners
+ )
+ )
+diff --git a/tests/events/test_auto_accept_invites.py b/tests/events/test_auto_accept_invites.py
+index d4e87b1b7f..d2100e9903 100644
+--- a/tests/events/test_auto_accept_invites.py
++++ b/tests/events/test_auto_accept_invites.py
+@@ -31,6 +31,7 @@ from twisted.test.proto_helpers import MemoryReactor
+
+ from synapse.api.constants import EventTypes
+ from synapse.api.errors import SynapseError
++from synapse.config._base import RootConfig
+ from synapse.config.auto_accept_invites import AutoAcceptInvitesConfig
+ from synapse.events.auto_accept_invites import InviteAutoAccepter
+ from synapse.federation.federation_base import event_from_pdu_json
+@@ -690,7 +691,7 @@ class InviteAutoAccepterInternalTestCase(TestCase):
+ "only_from_local_users": True,
+ }
+ }
+- parsed_config = AutoAcceptInvitesConfig()
++ parsed_config = AutoAcceptInvitesConfig(RootConfig())
+ parsed_config.read_config(config)
+
+ self.assertTrue(parsed_config.enabled)
+@@ -830,7 +831,7 @@ def create_module(
+ if config_override is None:
+ config_override = {}
+
+- config = AutoAcceptInvitesConfig()
++ config = AutoAcceptInvitesConfig(RootConfig())
+ config.read_config(config_override)
+
+ return InviteAutoAccepter(config, module_api)
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0050-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch b/packages/overlays/matrix-synapse/patches/0050-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch
new file mode 100644
index 0000000..2530521
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0050-Explain-why-we-flush_buffer-for-Python-print-.-outpu.patch
@@ -0,0 +1,75 @@
+From a3bbd7eeabee7c6b229e95e0e04af5b430ea32db Mon Sep 17 00:00:00 2001
+From: Eric Eastwood <erice@element.io>
+Date: Tue, 13 May 2025 10:40:49 -0500
+Subject: [PATCH 50/74] Explain why we `flush_buffer()` for Python `print(...)`
+ output (#18420)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Spawning from using this code elsewhere and not knowing why it's there.
+
+Based on this article and @reivilibre's experience mentioning
+`PYTHONUNBUFFERED=1`,
+
+> #### programming languages where the default “print” statement buffers
+>
+> Also, here are a few programming language where the default print
+statement will buffer output when writing to a pipe, and some ways to
+disable buffering if you want:
+>
+> - Python (disable with `python -u`, or `PYTHONUNBUFFERED=1`, or
+`sys.stdout.reconfigure(line_buffering=False)`, or `print(x,
+flush=True)`)
+>
+> _--
+https://jvns.ca/blog/2024/11/29/why-pipes-get-stuck-buffering/#programming-languages-where-the-default-print-statement-buffers_
+---
+ changelog.d/18420.misc | 1 +
+ docker/configure_workers_and_start.py | 5 +++++
+ docker/start.py | 5 +++++
+ 3 files changed, 11 insertions(+)
+ create mode 100644 changelog.d/18420.misc
+
+diff --git a/changelog.d/18420.misc b/changelog.d/18420.misc
+new file mode 100644
+index 0000000000..d52175af91
+--- /dev/null
++++ b/changelog.d/18420.misc
+@@ -0,0 +1 @@
++Explain why we `flush_buffer()` for Python `print(...)` output.
+diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
+index df34d51f77..102a88fad1 100755
+--- a/docker/configure_workers_and_start.py
++++ b/docker/configure_workers_and_start.py
+@@ -352,6 +352,11 @@ def error(txt: str) -> NoReturn:
+
+
+ def flush_buffers() -> None:
++ """
++ Python's `print()` buffers output by default, typically waiting until ~8KB
++ accumulates. This method can be used to flush the buffers so we can see the output
++ of any print statements so far.
++ """
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+diff --git a/docker/start.py b/docker/start.py
+index 818a5355ca..0be9976a0c 100755
+--- a/docker/start.py
++++ b/docker/start.py
+@@ -22,6 +22,11 @@ def error(txt: str) -> NoReturn:
+
+
+ def flush_buffers() -> None:
++ """
++ Python's `print()` buffers output by default, typically waiting until ~8KB
++ accumulates. This method can be used to flush the buffers so we can see the output
++ of any print statements so far.
++ """
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0051-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch b/packages/overlays/matrix-synapse/patches/0051-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch
new file mode 100644
index 0000000..c542c7c
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0051-Fix-room_list_publication_rules-docs-for-v1.126.0-18.patch
@@ -0,0 +1,64 @@
+From 194b923a6e625af6ca90bbbdc1f8a85a9215797e Mon Sep 17 00:00:00 2001
+From: Kim Brose <2803622+HarHarLinks@users.noreply.github.com>
+Date: Wed, 14 May 2025 10:36:54 +0000
+Subject: [PATCH 51/74] Fix room_list_publication_rules docs for v1.126.0
+ (#18286)
+
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18286.doc | 1 +
+ docs/usage/configuration/config_documentation.md | 15 ++++++++-------
+ 2 files changed, 9 insertions(+), 7 deletions(-)
+ create mode 100644 changelog.d/18286.doc
+
+diff --git a/changelog.d/18286.doc b/changelog.d/18286.doc
+new file mode 100644
+index 0000000000..37728351c5
+--- /dev/null
++++ b/changelog.d/18286.doc
+@@ -0,0 +1 @@
++Update `room_list_publication_rules` docs to consider defaults that changed in v1.126.0. Contributed by @HarHarLinks.
+diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
+index 5351bef83a..2228c18a6c 100644
+--- a/docs/usage/configuration/config_documentation.md
++++ b/docs/usage/configuration/config_documentation.md
+@@ -4331,28 +4331,29 @@ room list by default_
+ Example configuration:
+
+ ```yaml
+-# No rule list specified. Anyone may publish any room to the public list.
++# No rule list specified. No one may publish any room to the public list, except server admins.
+ # This is the default behaviour.
+ room_list_publication_rules:
+ ```
+
+ ```yaml
+-# A list of one rule which allows everything.
++# A list of one rule which denies everything.
+ # This has the same effect as the previous example.
+ room_list_publication_rules:
+- - "action": "allow"
++ - "action": "deny"
+ ```
+
+ ```yaml
+-# An empty list of rules. No-one may publish to the room list.
++# An empty list of rules.
++# This has the same effect as the previous example.
+ room_list_publication_rules: []
+ ```
+
+ ```yaml
+-# A list of one rule which denies everything.
+-# This has the same effect as the previous example.
++# A list of one rule which allows everything.
++# This was the default behaviour pre v1.126.0.
+ room_list_publication_rules:
+- - "action": "deny"
++ - "action": "allow"
+ ```
+
+ ```yaml
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0052-Add-option-to-allow-registrations-that-begin-with-_-.patch b/packages/overlays/matrix-synapse/patches/0052-Add-option-to-allow-registrations-that-begin-with-_-.patch
new file mode 100644
index 0000000..97ef21c
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0052-Add-option-to-allow-registrations-that-begin-with-_-.patch
@@ -0,0 +1,116 @@
+From 44ae5362fd952dbb209f4b52ee9c96641163f032 Mon Sep 17 00:00:00 2001
+From: _ <x5f@fastmail.com>
+Date: Thu, 15 May 2025 04:31:52 -0700
+Subject: [PATCH 52/74] Add option to allow registrations that begin with '_'
+ (#18262)
+
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18262.feature | 1 +
+ .../configuration/config_documentation.md | 14 +++++++++++
+ synapse/config/registration.py | 4 ++++
+ synapse/handlers/register.py | 5 +++-
+ tests/handlers/test_register.py | 23 +++++++++++++++++++
+ 5 files changed, 46 insertions(+), 1 deletion(-)
+ create mode 100644 changelog.d/18262.feature
+
+diff --git a/changelog.d/18262.feature b/changelog.d/18262.feature
+new file mode 100644
+index 0000000000..c8249faa76
+--- /dev/null
++++ b/changelog.d/18262.feature
+@@ -0,0 +1 @@
++Add option to allow registrations that begin with `_`. Contributed by `_` (@hex5f).
+diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
+index 2228c18a6c..e688bc5cd8 100644
+--- a/docs/usage/configuration/config_documentation.md
++++ b/docs/usage/configuration/config_documentation.md
+@@ -2887,6 +2887,20 @@ Example configuration:
+ inhibit_user_in_use_error: true
+ ```
+ ---
++### `allow_underscore_prefixed_registration`
++
++Whether users are allowed to register with a underscore-prefixed localpart.
++By default, AppServices use prefixes like `_example` to namespace their
++associated ghost users. If turned on, this may result in clashes or confusion.
++Useful when provisioning users from an external identity provider.
++
++Defaults to false.
++
++Example configuration:
++```yaml
++allow_underscore_prefixed_registration: false
++```
++---
+ ## User session management
+ ---
+ ### `session_lifetime`
+diff --git a/synapse/config/registration.py b/synapse/config/registration.py
+index 3cf7031656..8adf21079e 100644
+--- a/synapse/config/registration.py
++++ b/synapse/config/registration.py
+@@ -162,6 +162,10 @@ class RegistrationConfig(Config):
+ "disable_msisdn_registration", False
+ )
+
++ self.allow_underscore_prefixed_localpart = config.get(
++ "allow_underscore_prefixed_localpart", False
++ )
++
+ session_lifetime = config.get("session_lifetime")
+ if session_lifetime is not None:
+ session_lifetime = self.parse_duration(session_lifetime)
+diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
+index ecfea175c7..3e86349981 100644
+--- a/synapse/handlers/register.py
++++ b/synapse/handlers/register.py
+@@ -159,7 +159,10 @@ class RegistrationHandler:
+ if not localpart:
+ raise SynapseError(400, "User ID cannot be empty", Codes.INVALID_USERNAME)
+
+- if localpart[0] == "_":
++ if (
++ localpart[0] == "_"
++ and not self.hs.config.registration.allow_underscore_prefixed_localpart
++ ):
+ raise SynapseError(
+ 400, "User ID may not begin with _", Codes.INVALID_USERNAME
+ )
+diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
+index 92487692db..dda389c08b 100644
+--- a/tests/handlers/test_register.py
++++ b/tests/handlers/test_register.py
+@@ -588,6 +588,29 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
+ d = self.store.is_support_user(user_id)
+ self.assertFalse(self.get_success(d))
+
++ def test_underscore_localpart_rejected_by_default(self) -> None:
++ for invalid_user_id in ("_", "_prefixed"):
++ with self.subTest(invalid_user_id=invalid_user_id):
++ self.get_failure(
++ self.handler.register_user(localpart=invalid_user_id),
++ SynapseError,
++ )
++
++ @override_config(
++ {
++ "allow_underscore_prefixed_localpart": True,
++ }
++ )
++ def test_underscore_localpart_allowed_if_configured(self) -> None:
++ for valid_user_id in ("_", "_prefixed"):
++ with self.subTest(valid_user_id=valid_user_id):
++ user_id = self.get_success(
++ self.handler.register_user(
++ localpart=valid_user_id,
++ ),
++ )
++ self.assertEqual(user_id, f"@{valid_user_id}:test")
++
+ def test_invalid_user_id(self) -> None:
+ invalid_user_id = "^abcd"
+ self.get_failure(
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0053-Move-index-creation-to-background-update-18439.patch b/packages/overlays/matrix-synapse/patches/0053-Move-index-creation-to-background-update-18439.patch
new file mode 100644
index 0000000..ee1cfc7
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0053-Move-index-creation-to-background-update-18439.patch
@@ -0,0 +1,63 @@
+From f5ed52c1e24b5649d7d81dd9690bb606e387961b Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erikj@element.io>
+Date: Thu, 15 May 2025 12:43:24 +0100
+Subject: [PATCH 53/74] Move index creation to background update (#18439)
+
+Follow on from #18375. This prevents blocking startup on creating the
+index, which can take a while
+
+---------
+
+Co-authored-by: Devon Hudson <devon.dmytro@gmail.com>
+---
+ changelog.d/18439.bugfix | 1 +
+ synapse/storage/databases/main/sliding_sync.py | 8 ++++++++
+ ...snapshot_idx.sql => 04_ss_membership_snapshot_idx.sql} | 4 ++--
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+ create mode 100644 changelog.d/18439.bugfix
+ rename synapse/storage/schema/main/delta/92/{03_ss_membership_snapshot_idx.sql => 04_ss_membership_snapshot_idx.sql} (73%)
+
+diff --git a/changelog.d/18439.bugfix b/changelog.d/18439.bugfix
+new file mode 100644
+index 0000000000..5ee9bda474
+--- /dev/null
++++ b/changelog.d/18439.bugfix
+@@ -0,0 +1 @@
++Fix startup being blocked on creating a new index. Introduced in v1.130.0rc1.
+diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py
+index a287fd2a3f..6a62b11d1e 100644
+--- a/synapse/storage/databases/main/sliding_sync.py
++++ b/synapse/storage/databases/main/sliding_sync.py
+@@ -68,6 +68,14 @@ class SlidingSyncStore(SQLBaseStore):
+ columns=("membership_event_id",),
+ )
+
++ self.db_pool.updates.register_background_index_update(
++ update_name="sliding_sync_membership_snapshots_user_id_stream_ordering",
++ index_name="sliding_sync_membership_snapshots_user_id_stream_ordering",
++ table="sliding_sync_membership_snapshots",
++ columns=("user_id", "event_stream_ordering"),
++ replaces_index="sliding_sync_membership_snapshots_user_id",
++ )
++
+ async def get_latest_bump_stamp_for_room(
+ self,
+ room_id: str,
+diff --git a/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql b/synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql
+similarity index 73%
+rename from synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql
+rename to synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql
+index c694203f95..6f5b7cb06e 100644
+--- a/synapse/storage/schema/main/delta/92/03_ss_membership_snapshot_idx.sql
++++ b/synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql
+@@ -12,5 +12,5 @@
+ -- <https://www.gnu.org/licenses/agpl-3.0.html>.
+
+ -- So we can fetch all rooms for a given user sorted by stream order
+-DROP INDEX IF EXISTS sliding_sync_membership_snapshots_user_id;
+-CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id, event_stream_ordering);
++INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
++ (9204, 'sliding_sync_membership_snapshots_user_id_stream_ordering', '{}');
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0054-remove-room-without-listeners-from-Notifier.room_to_.patch b/packages/overlays/matrix-synapse/patches/0054-remove-room-without-listeners-from-Notifier.room_to_.patch
new file mode 100644
index 0000000..8c90d7b
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0054-remove-room-without-listeners-from-Notifier.room_to_.patch
@@ -0,0 +1,38 @@
+From 0afdc0fc7ffe2cb7a2fa6d47f22b685cbacc7223 Mon Sep 17 00:00:00 2001
+From: Stanislav Kazantsev <stas.kazancev54@gmail.com>
+Date: Thu, 15 May 2025 23:18:17 +0600
+Subject: [PATCH 54/74] remove room without listeners from
+ Notifier.room_to_user_streams (#18380)
+
+Co-authored-by: Andrew Morgan <andrew@amorgan.xyz>
+---
+ changelog.d/18380.misc | 1 +
+ synapse/notifier.py | 3 +++
+ 2 files changed, 4 insertions(+)
+ create mode 100644 changelog.d/18380.misc
+
+diff --git a/changelog.d/18380.misc b/changelog.d/18380.misc
+new file mode 100644
+index 0000000000..5d6017be26
+--- /dev/null
++++ b/changelog.d/18380.misc
+@@ -0,0 +1 @@
++Fix a memory leak in `_NotifierUserStream`.
+\ No newline at end of file
+diff --git a/synapse/notifier.py b/synapse/notifier.py
+index 1914d0c914..6190432b87 100644
+--- a/synapse/notifier.py
++++ b/synapse/notifier.py
+@@ -158,6 +158,9 @@ class _NotifierUserStream:
+ lst = notifier.room_to_user_streams.get(room, set())
+ lst.discard(self)
+
++ if not lst:
++ notifier.room_to_user_streams.pop(room, None)
++
+ notifier.user_to_user_stream.pop(self.user_id)
+
+ def count_listeners(self) -> int:
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0055-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch b/packages/overlays/matrix-synapse/patches/0055-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch
new file mode 100644
index 0000000..be91744
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0055-Fix-admin-redaction-endpoint-not-redacting-encrypted.patch
@@ -0,0 +1,110 @@
+From 74e2f028bbcaeb2a572d03e66334f3c671bffae2 Mon Sep 17 00:00:00 2001
+From: Shay <hillerys@element.io>
+Date: Mon, 19 May 2025 01:48:46 -0700
+Subject: [PATCH 55/74] Fix admin redaction endpoint not redacting encrypted
+ messages (#18434)
+
+---
+ changelog.d/18434.bugfix | 1 +
+ synapse/handlers/admin.py | 2 +-
+ tests/rest/admin/test_user.py | 55 ++++++++++++++++++++++++++++++++++-
+ 3 files changed, 56 insertions(+), 2 deletions(-)
+ create mode 100644 changelog.d/18434.bugfix
+
+diff --git a/changelog.d/18434.bugfix b/changelog.d/18434.bugfix
+new file mode 100644
+index 0000000000..dd094c83e8
+--- /dev/null
++++ b/changelog.d/18434.bugfix
+@@ -0,0 +1 @@
++Fix admin redaction endpoint not redacting encrypted messages.
+\ No newline at end of file
+diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
+index f3e7790d43..971a74244f 100644
+--- a/synapse/handlers/admin.py
++++ b/synapse/handlers/admin.py
+@@ -445,7 +445,7 @@ class AdminHandler:
+ user_id,
+ room,
+ limit,
+- ["m.room.member", "m.room.message"],
++ ["m.room.member", "m.room.message", "m.room.encrypted"],
+ )
+ if not event_ids:
+ # nothing to redact in this room
+diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
+index a35a250975..874c29c935 100644
+--- a/tests/rest/admin/test_user.py
++++ b/tests/rest/admin/test_user.py
+@@ -36,7 +36,13 @@ from twisted.test.proto_helpers import MemoryReactor
+ from twisted.web.resource import Resource
+
+ import synapse.rest.admin
+-from synapse.api.constants import ApprovalNoticeMedium, EventTypes, LoginType, UserTypes
++from synapse.api.constants import (
++ ApprovalNoticeMedium,
++ EventContentFields,
++ EventTypes,
++ LoginType,
++ UserTypes,
++)
+ from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
+ from synapse.api.room_versions import RoomVersions
+ from synapse.media.filepath import MediaFilePaths
+@@ -5467,6 +5473,53 @@ class UserRedactionTestCase(unittest.HomeserverTestCase):
+ # we originally sent 5 messages so 5 should be redacted
+ self.assertEqual(len(original_message_ids), 0)
+
++ def test_redact_redacts_encrypted_messages(self) -> None:
++ """
++ Test that user's encrypted messages are redacted
++ """
++ encrypted_room = self.helper.create_room_as(
++ self.admin, tok=self.admin_tok, room_version="7"
++ )
++ self.helper.send_state(
++ encrypted_room,
++ EventTypes.RoomEncryption,
++ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
++ tok=self.admin_tok,
++ )
++ # join room send some messages
++ originals = []
++ join = self.helper.join(encrypted_room, self.bad_user, tok=self.bad_user_tok)
++ originals.append(join["event_id"])
++ for _ in range(15):
++ res = self.helper.send_event(
++ encrypted_room, "m.room.encrypted", {}, tok=self.bad_user_tok
++ )
++ originals.append(res["event_id"])
++
++ # redact user's events
++ channel = self.make_request(
++ "POST",
++ f"/_synapse/admin/v1/user/{self.bad_user}/redact",
++ content={"rooms": []},
++ access_token=self.admin_tok,
++ )
++ self.assertEqual(channel.code, 200)
++
++ matched = []
++ filter = json.dumps({"types": [EventTypes.Redaction]})
++ channel = self.make_request(
++ "GET",
++ f"rooms/{encrypted_room}/messages?filter={filter}&limit=50",
++ access_token=self.admin_tok,
++ )
++ self.assertEqual(channel.code, 200)
++
++ for event in channel.json_body["chunk"]:
++ for event_id in originals:
++ if event["type"] == "m.room.redaction" and event["redacts"] == event_id:
++ matched.append(event_id)
++ self.assertEqual(len(matched), len(originals))
++
+
+ class UserRedactionBackgroundTaskTestCase(BaseMultiWorkerStreamTestCase):
+ servlets = [
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0056-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch b/packages/overlays/matrix-synapse/patches/0056-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch
new file mode 100644
index 0000000..55c5fbd
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0056-Bump-actions-setup-python-from-5.5.0-to-5.6.0-18398.patch
@@ -0,0 +1,280 @@
+From 078cefd014806a67249ddb59b5976c7e93227f37 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Mon, 19 May 2025 09:51:08 +0100
+Subject: [PATCH 56/74] Bump actions/setup-python from 5.5.0 to 5.6.0 (#18398)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Bumps [actions/setup-python](https://github.com/actions/setup-python)
+from 5.5.0 to 5.6.0.
+<details>
+<summary>Release notes</summary>
+<p><em>Sourced from <a
+href="https://github.com/actions/setup-python/releases">actions/setup-python's
+releases</a>.</em></p>
+<blockquote>
+<h2>v5.6.0</h2>
+<h2>What's Changed</h2>
+<ul>
+<li>Workflow updates related to Ubuntu 20.04 by <a
+href="https://github.com/aparnajyothi-y"><code>@aparnajyothi-y</code></a>
+in <a
+href="https://redirect.github.com/actions/setup-python/pull/1065">actions/setup-python#1065</a></li>
+<li>Fix for Candidate Not Iterable Error by <a
+href="https://github.com/aparnajyothi-y"><code>@aparnajyothi-y</code></a>
+in <a
+href="https://redirect.github.com/actions/setup-python/pull/1082">actions/setup-python#1082</a></li>
+<li>Upgrade semver and <code>@types/semver</code> by <a
+href="https://github.com/dependabot"><code>@dependabot</code></a> in <a
+href="https://redirect.github.com/actions/setup-python/pull/1091">actions/setup-python#1091</a></li>
+<li>Upgrade prettier from 2.8.8 to 3.5.3 by <a
+href="https://github.com/dependabot"><code>@dependabot</code></a> in <a
+href="https://redirect.github.com/actions/setup-python/pull/1046">actions/setup-python#1046</a></li>
+<li>Upgrade ts-jest from 29.1.2 to 29.3.2 by <a
+href="https://github.com/dependabot"><code>@dependabot</code></a> in <a
+href="https://redirect.github.com/actions/setup-python/pull/1081">actions/setup-python#1081</a></li>
+</ul>
+<p><strong>Full Changelog</strong>: <a
+href="https://github.com/actions/setup-python/compare/v5...v5.6.0">https://github.com/actions/setup-python/compare/v5...v5.6.0</a></p>
+</blockquote>
+</details>
+<details>
+<summary>Commits</summary>
+<ul>
+<li><a
+href="https://github.com/actions/setup-python/commit/a26af69be951a213d495a4c3e4e4022e16d87065"><code>a26af69</code></a>
+Bump ts-jest from 29.1.2 to 29.3.2 (<a
+href="https://redirect.github.com/actions/setup-python/issues/1081">#1081</a>)</li>
+<li><a
+href="https://github.com/actions/setup-python/commit/30eafe95483bd95135b7eda0c66a0369af9afdf1"><code>30eafe9</code></a>
+Bump prettier from 2.8.8 to 3.5.3 (<a
+href="https://redirect.github.com/actions/setup-python/issues/1046">#1046</a>)</li>
+<li><a
+href="https://github.com/actions/setup-python/commit/5d95bc16d4bc83bb56202da9630d84c6f8a2d8f5"><code>5d95bc1</code></a>
+Bump semver and <code>@types/semver</code> (<a
+href="https://redirect.github.com/actions/setup-python/issues/1091">#1091</a>)</li>
+<li><a
+href="https://github.com/actions/setup-python/commit/6ed2c67c8abe7646815dbd50364eea862d396fd9"><code>6ed2c67</code></a>
+Fix for Candidate Not Iterable Error (<a
+href="https://redirect.github.com/actions/setup-python/issues/1082">#1082</a>)</li>
+<li><a
+href="https://github.com/actions/setup-python/commit/e348410e00f449ece8581cb8e88be8f0e7712da6"><code>e348410</code></a>
+Remove Ubuntu 20.04 from workflows due to deprecation from 2025-04-15
+(<a
+href="https://redirect.github.com/actions/setup-python/issues/1065">#1065</a>)</li>
+<li>See full diff in <a
+href="https://github.com/actions/setup-python/compare/8d9ed9ac5c53483de85588cdf95a591a75ab9f55...a26af69be951a213d495a4c3e4e4022e16d87065">compare
+view</a></li>
+</ul>
+</details>
+<br />
+
+
+[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
+
+Dependabot will resolve any conflicts with this PR as long as you don't
+alter it yourself. You can also trigger a rebase manually by commenting
+`@dependabot rebase`.
+
+[//]: # (dependabot-automerge-start)
+[//]: # (dependabot-automerge-end)
+
+---
+
+<details>
+<summary>Dependabot commands and options</summary>
+<br />
+
+You can trigger Dependabot actions by commenting on this PR:
+- `@dependabot rebase` will rebase this PR
+- `@dependabot recreate` will recreate this PR, overwriting any edits
+that have been made to it
+- `@dependabot merge` will merge this PR after your CI passes on it
+- `@dependabot squash and merge` will squash and merge this PR after
+your CI passes on it
+- `@dependabot cancel merge` will cancel a previously requested merge
+and block automerging
+- `@dependabot reopen` will reopen this PR if it is closed
+- `@dependabot close` will close this PR and stop Dependabot recreating
+it. You can achieve the same result by closing it manually
+- `@dependabot show <dependency name> ignore conditions` will show all
+of the ignore conditions of the specified dependency
+- `@dependabot ignore this major version` will close this PR and stop
+Dependabot creating any more for this major version (unless you reopen
+the PR or upgrade to it yourself)
+- `@dependabot ignore this minor version` will close this PR and stop
+Dependabot creating any more for this minor version (unless you reopen
+the PR or upgrade to it yourself)
+- `@dependabot ignore this dependency` will close this PR and stop
+Dependabot creating any more for this dependency (unless you reopen the
+PR or upgrade to it yourself)
+
+
+</details>
+
+Signed-off-by: dependabot[bot] <support@github.com>
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ .github/workflows/docs-pr.yaml | 2 +-
+ .github/workflows/docs.yaml | 2 +-
+ .github/workflows/latest_deps.yml | 2 +-
+ .github/workflows/poetry_lockfile.yaml | 2 +-
+ .github/workflows/release-artifacts.yml | 8 ++++----
+ .github/workflows/tests.yml | 12 ++++++------
+ 6 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
+index 616ef0f9cf..1f4f79598a 100644
+--- a/.github/workflows/docs-pr.yaml
++++ b/.github/workflows/docs-pr.yaml
+@@ -24,7 +24,7 @@ jobs:
+ mdbook-version: '0.4.17'
+
+ - name: Setup python
+- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: "3.x"
+
+diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
+index 05ae608d06..930c71a8b4 100644
+--- a/.github/workflows/docs.yaml
++++ b/.github/workflows/docs.yaml
+@@ -64,7 +64,7 @@ jobs:
+ run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
+
+ - name: Setup python
+- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: "3.x"
+
+diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
+index 366bb4cddb..ee0dac3beb 100644
+--- a/.github/workflows/latest_deps.yml
++++ b/.github/workflows/latest_deps.yml
+@@ -86,7 +86,7 @@ jobs:
+ -e POSTGRES_PASSWORD=postgres \
+ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
+ postgres:${{ matrix.postgres-version }}
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: "3.x"
+ - run: pip install .[all,test]
+diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml
+index 31b9147e98..1668ad81d2 100644
+--- a/.github/workflows/poetry_lockfile.yaml
++++ b/.github/workflows/poetry_lockfile.yaml
+@@ -17,7 +17,7 @@ jobs:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: '3.x'
+ - run: pip install tomli
+diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
+index e03c9d2bd5..572d73e6ad 100644
+--- a/.github/workflows/release-artifacts.yml
++++ b/.github/workflows/release-artifacts.yml
+@@ -28,7 +28,7 @@ jobs:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: '3.x'
+ - id: set-distros
+@@ -74,7 +74,7 @@ jobs:
+ ${{ runner.os }}-buildx-
+
+ - name: Set up python
+- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: '3.x'
+
+@@ -132,7 +132,7 @@ jobs:
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ # setup-python@v4 doesn't impose a default python version. Need to use 3.x
+ # here, because `python` on osx points to Python 2.7.
+@@ -177,7 +177,7 @@ jobs:
+
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: '3.10'
+
+diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
+index a7e35a0ece..848240f68e 100644
+--- a/.github/workflows/tests.yml
++++ b/.github/workflows/tests.yml
+@@ -102,7 +102,7 @@ jobs:
+
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: "3.x"
+ - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
+@@ -112,7 +112,7 @@ jobs:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: "3.x"
+ - run: .ci/scripts/check_lockfile.py
+@@ -192,7 +192,7 @@ jobs:
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ fetch-depth: 0
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: "3.x"
+ - run: "pip install 'towncrier>=18.6.0rc1'"
+@@ -279,7 +279,7 @@ jobs:
+ if: ${{ needs.changes.outputs.linting_readme == 'true' }}
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: "3.x"
+ - run: "pip install rstcheck"
+@@ -327,7 +327,7 @@ jobs:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: "3.x"
+ - id: get-matrix
+@@ -414,7 +414,7 @@ jobs:
+ sudo apt-get -qq install build-essential libffi-dev python3-dev \
+ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
+
+- - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
++ - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: '3.9'
+
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0057-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch b/packages/overlays/matrix-synapse/patches/0057-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch
new file mode 100644
index 0000000..ffb8bd3
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0057-Bump-docker-build-push-action-from-6.15.0-to-6.16.0-.patch
@@ -0,0 +1,150 @@
+From 7d4c3b64e34571f3ace10fa7e33d07853bf16d67 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Mon, 19 May 2025 09:51:52 +0100
+Subject: [PATCH 57/74] Bump docker/build-push-action from 6.15.0 to 6.16.0
+ (#18397)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Bumps
+[docker/build-push-action](https://github.com/docker/build-push-action)
+from 6.15.0 to 6.16.0.
+<details>
+<summary>Release notes</summary>
+<p><em>Sourced from <a
+href="https://github.com/docker/build-push-action/releases">docker/build-push-action's
+releases</a>.</em></p>
+<blockquote>
+<h2>v6.16.0</h2>
+<ul>
+<li>Handle no default attestations env var by <a
+href="https://github.com/crazy-max"><code>@crazy-max</code></a> in <a
+href="https://redirect.github.com/docker/build-push-action/pull/1343">docker/build-push-action#1343</a></li>
+<li>Only print secret keys in build summary output by <a
+href="https://github.com/crazy-max"><code>@crazy-max</code></a> in <a
+href="https://redirect.github.com/docker/build-push-action/pull/1353">docker/build-push-action#1353</a></li>
+<li>Bump <code>@docker/actions-toolkit</code> from 0.56.0 to 0.59.0 in
+<a
+href="https://redirect.github.com/docker/build-push-action/pull/1352">docker/build-push-action#1352</a></li>
+</ul>
+<p><strong>Full Changelog</strong>: <a
+href="https://github.com/docker/build-push-action/compare/v6.15.0...v6.16.0">https://github.com/docker/build-push-action/compare/v6.15.0...v6.16.0</a></p>
+</blockquote>
+</details>
+<details>
+<summary>Commits</summary>
+<ul>
+<li><a
+href="https://github.com/docker/build-push-action/commit/14487ce63c7a62a4a324b0bfb37086795e31c6c1"><code>14487ce</code></a>
+Merge pull request <a
+href="https://redirect.github.com/docker/build-push-action/issues/1343">#1343</a>
+from crazy-max/fix-no-default-attest</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/0ec91264d895acf7dfe05d54d8a3cc28f95b6346"><code>0ec9126</code></a>
+Merge pull request <a
+href="https://redirect.github.com/docker/build-push-action/issues/1366">#1366</a>
+from crazy-max/pr-assign-author</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/b749522b90af1b517f52d8c1e67b2a965cea5eae"><code>b749522</code></a>
+pr-assign-author workflow</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/c566248492c912e39910ac79e2f05a82260233a8"><code>c566248</code></a>
+Merge pull request <a
+href="https://redirect.github.com/docker/build-push-action/issues/1363">#1363</a>
+from crazy-max/fix-codecov</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/13275dd76e44afdffdd61da8b8ae8e26ee11671f"><code>13275dd</code></a>
+ci: fix missing source for codecov</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/67dc78bbaf388b3265f7e1c880e681f4b90d5f48"><code>67dc78b</code></a>
+Merge pull request <a
+href="https://redirect.github.com/docker/build-push-action/issues/1361">#1361</a>
+from mschoettle/patch-1</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/0760504437ba8d0d98e7d5b625560bdede11b3b5"><code>0760504</code></a>
+docs: add validating build configuration example</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/1c198f4467ce458288d816cabd773cd574f16977"><code>1c198f4</code></a>
+chore: update generated content</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/288d9e2e4a70c24711ba959b94c2209b9205347e"><code>288d9e2</code></a>
+handle no default attestations env var</li>
+<li><a
+href="https://github.com/docker/build-push-action/commit/88844b95d8cbbb41035fa9c94e5967a33b92db78"><code>88844b9</code></a>
+Merge pull request <a
+href="https://redirect.github.com/docker/build-push-action/issues/1353">#1353</a>
+from crazy-max/summary-secret-keys</li>
+<li>Additional commits viewable in <a
+href="https://github.com/docker/build-push-action/compare/471d1dc4e07e5cdedd4c2171150001c434f0b7a4...14487ce63c7a62a4a324b0bfb37086795e31c6c1">compare
+view</a></li>
+</ul>
+</details>
+<br />
+
+
+[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
+
+Dependabot will resolve any conflicts with this PR as long as you don't
+alter it yourself. You can also trigger a rebase manually by commenting
+`@dependabot rebase`.
+
+[//]: # (dependabot-automerge-start)
+[//]: # (dependabot-automerge-end)
+
+---
+
+<details>
+<summary>Dependabot commands and options</summary>
+<br />
+
+You can trigger Dependabot actions by commenting on this PR:
+- `@dependabot rebase` will rebase this PR
+- `@dependabot recreate` will recreate this PR, overwriting any edits
+that have been made to it
+- `@dependabot merge` will merge this PR after your CI passes on it
+- `@dependabot squash and merge` will squash and merge this PR after
+your CI passes on it
+- `@dependabot cancel merge` will cancel a previously requested merge
+and block automerging
+- `@dependabot reopen` will reopen this PR if it is closed
+- `@dependabot close` will close this PR and stop Dependabot recreating
+it. You can achieve the same result by closing it manually
+- `@dependabot show <dependency name> ignore conditions` will show all
+of the ignore conditions of the specified dependency
+- `@dependabot ignore this major version` will close this PR and stop
+Dependabot creating any more for this major version (unless you reopen
+the PR or upgrade to it yourself)
+- `@dependabot ignore this minor version` will close this PR and stop
+Dependabot creating any more for this minor version (unless you reopen
+the PR or upgrade to it yourself)
+- `@dependabot ignore this dependency` will close this PR and stop
+Dependabot creating any more for this dependency (unless you reopen the
+PR or upgrade to it yourself)
+
+
+</details>
+
+Signed-off-by: dependabot[bot] <support@github.com>
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ .github/workflows/docker.yml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
+index c617753c7a..009089db3a 100644
+--- a/.github/workflows/docker.yml
++++ b/.github/workflows/docker.yml
+@@ -72,7 +72,7 @@ jobs:
+
+ - name: Build and push all platforms
+ id: build-and-push
+- uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
++ uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
+ with:
+ push: true
+ labels: |
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0058-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch b/packages/overlays/matrix-synapse/patches/0058-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch
new file mode 100644
index 0000000..aa7b806
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0058-Check-for-CREATE-DROP-INDEX-in-schema-deltas-18440.patch
@@ -0,0 +1,235 @@
+From fa4a00a2da753a52dde582c0f56e3ea6567bd53b Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erikj@element.io>
+Date: Mon, 19 May 2025 11:52:05 +0100
+Subject: [PATCH 58/74] Check for `CREATE/DROP INDEX` in schema deltas (#18440)
+
+As these should be background updates.
+---
+ changelog.d/18440.misc | 1 +
+ scripts-dev/check_schema_delta.py | 127 +++++++++++++++++++++++-------
+ 2 files changed, 99 insertions(+), 29 deletions(-)
+ create mode 100644 changelog.d/18440.misc
+
+diff --git a/changelog.d/18440.misc b/changelog.d/18440.misc
+new file mode 100644
+index 0000000000..6aaa6dde5c
+--- /dev/null
++++ b/changelog.d/18440.misc
+@@ -0,0 +1 @@
++Add lint to ensure we don't add a `CREATE/DROP INDEX` in a schema delta.
+diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py
+index 467be96fdf..454784c3ae 100755
+--- a/scripts-dev/check_schema_delta.py
++++ b/scripts-dev/check_schema_delta.py
+@@ -1,6 +1,8 @@
+ #!/usr/bin/env python3
+
+ # Check that no schema deltas have been added to the wrong version.
++#
++# Also checks that schema deltas do not try and create or drop indices.
+
+ import re
+ from typing import Any, Dict, List
+@@ -9,6 +11,13 @@ import click
+ import git
+
+ SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
++INDEX_CREATION_REGEX = re.compile(r"CREATE .*INDEX .*ON ([a-z_]+)", flags=re.IGNORECASE)
++INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_]+)", flags=re.IGNORECASE)
++TABLE_CREATION_REGEX = re.compile(r"CREATE .*TABLE ([a-z_]+)", flags=re.IGNORECASE)
++
++# The base branch we want to check against. We use the main development branch
++# on the assumption that is what we are developing against.
++DEVELOP_BRANCH = "develop"
+
+
+ @click.command()
+@@ -20,6 +29,9 @@ SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
+ help="Always output ANSI colours",
+ )
+ def main(force_colors: bool) -> None:
++ # Return code. Set to non-zero when we encounter an error
++ return_code = 0
++
+ click.secho(
+ "+++ Checking schema deltas are in the right folder",
+ fg="green",
+@@ -30,17 +42,17 @@ def main(force_colors: bool) -> None:
+ click.secho("Updating repo...")
+
+ repo = git.Repo()
+- repo.remote().fetch()
++ repo.remote().fetch(refspec=DEVELOP_BRANCH)
+
+ click.secho("Getting current schema version...")
+
+- r = repo.git.show("origin/develop:synapse/storage/schema/__init__.py")
++ r = repo.git.show(f"origin/{DEVELOP_BRANCH}:synapse/storage/schema/__init__.py")
+
+ locals: Dict[str, Any] = {}
+ exec(r, locals)
+ current_schema_version = locals["SCHEMA_VERSION"]
+
+- diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
++ diffs: List[git.Diff] = repo.remote().refs[DEVELOP_BRANCH].commit.diff(None)
+
+ # Get the schema version of the local file to check against current schema on develop
+ with open("synapse/storage/schema/__init__.py") as file:
+@@ -53,7 +65,7 @@ def main(force_colors: bool) -> None:
+ # local schema version must be +/-1 the current schema version on develop
+ if abs(local_schema_version - current_schema_version) != 1:
+ click.secho(
+- "The proposed schema version has diverged more than one version from develop, please fix!",
++ f"The proposed schema version has diverged more than one version from {DEVELOP_BRANCH}, please fix!",
+ fg="red",
+ bold=True,
+ color=force_colors,
+@@ -67,21 +79,28 @@ def main(force_colors: bool) -> None:
+ click.secho(f"Current schema version: {current_schema_version}")
+
+ seen_deltas = False
+- bad_files = []
++ bad_delta_files = []
++ changed_delta_files = []
+ for diff in diffs:
+- if not diff.new_file or diff.b_path is None:
++ if diff.b_path is None:
++ # We don't lint deleted files.
+ continue
+
+ match = SCHEMA_FILE_REGEX.match(diff.b_path)
+ if not match:
+ continue
+
++ changed_delta_files.append(diff.b_path)
++
++ if not diff.new_file:
++ continue
++
+ seen_deltas = True
+
+ _, delta_version, _ = match.groups()
+
+ if delta_version != str(current_schema_version):
+- bad_files.append(diff.b_path)
++ bad_delta_files.append(diff.b_path)
+
+ if not seen_deltas:
+ click.secho(
+@@ -92,41 +111,91 @@ def main(force_colors: bool) -> None:
+ )
+ return
+
+- if not bad_files:
++ if bad_delta_files:
++ bad_delta_files.sort()
++
+ click.secho(
+- f"All deltas are in the correct folder: {current_schema_version}!",
+- fg="green",
++ "Found deltas in the wrong folder!",
++ fg="red",
+ bold=True,
+ color=force_colors,
+ )
+- return
+
+- bad_files.sort()
+-
+- click.secho(
+- "Found deltas in the wrong folder!",
+- fg="red",
+- bold=True,
+- color=force_colors,
+- )
++ for f in bad_delta_files:
++ click.secho(
++ f"\t{f}",
++ fg="red",
++ bold=True,
++ color=force_colors,
++ )
+
+- for f in bad_files:
++ click.secho()
+ click.secho(
+- f"\t{f}",
++ f"Please move these files to delta/{current_schema_version}/",
+ fg="red",
+ bold=True,
+ color=force_colors,
+ )
+
+- click.secho()
+- click.secho(
+- f"Please move these files to delta/{current_schema_version}/",
+- fg="red",
+- bold=True,
+- color=force_colors,
+- )
++ else:
++ click.secho(
++ f"All deltas are in the correct folder: {current_schema_version}!",
++ fg="green",
++ bold=True,
++ color=force_colors,
++ )
+
+- click.get_current_context().exit(1)
++ # Make sure we process them in order. This sort works because deltas are numbered
++ # and delta files are also numbered in order.
++ changed_delta_files.sort()
++
++ # Now check that we're not trying to create or drop indices. If we want to
++ # do that they should be in background updates. The exception is when we
++ # create indices on tables we've just created.
++ created_tables = set()
++ for delta_file in changed_delta_files:
++ with open(delta_file) as fd:
++ delta_lines = fd.readlines()
++
++ for line in delta_lines:
++ # Strip SQL comments
++ line = line.split("--", maxsplit=1)[0]
++
++ # Check and track any tables we create
++ match = TABLE_CREATION_REGEX.search(line)
++ if match:
++ table_name = match.group(1)
++ created_tables.add(table_name)
++
++ # Check for dropping indices, these are always banned
++ match = INDEX_DELETION_REGEX.search(line)
++ if match:
++ clause = match.group()
++
++ click.secho(
++ f"Found delta with index deletion: '{clause}' in {delta_file}\nThese should be in background updates.",
++ fg="red",
++ bold=True,
++ color=force_colors,
++ )
++ return_code = 1
++
++ # Check for index creation, which is only allowed for tables we've
++ # created.
++ match = INDEX_CREATION_REGEX.search(line)
++ if match:
++ clause = match.group()
++ table_name = match.group(1)
++ if table_name not in created_tables:
++ click.secho(
++ f"Found delta with index creation: '{clause}' in {delta_file}\nThese should be in background updates.",
++ fg="red",
++ bold=True,
++ color=force_colors,
++ )
++ return_code = 1
++
++ click.get_current_context().exit(return_code)
+
+
+ if __name__ == "__main__":
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0059-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch b/packages/overlays/matrix-synapse/patches/0059-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch
new file mode 100644
index 0000000..1605034
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0059-Bump-pyo3-log-from-0.12.3-to-0.12.4-18453.patch
@@ -0,0 +1,29 @@
+From b3b24c69fcbdb67de04b0388aa104d43780ba88f Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Mon, 19 May 2025 13:04:15 +0100
+Subject: [PATCH 59/74] Bump pyo3-log from 0.12.3 to 0.12.4 (#18453)
+
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ Cargo.lock | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Cargo.lock b/Cargo.lock
+index 27a2e26be5..13156e67b5 100644
+--- a/Cargo.lock
++++ b/Cargo.lock
+@@ -316,9 +316,9 @@ dependencies = [
+
+ [[package]]
+ name = "pyo3-log"
+-version = "0.12.3"
++version = "0.12.4"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+-checksum = "7079e412e909af5d6be7c04a7f29f6a2837a080410e1c529c9dee2c367383db4"
++checksum = "45192e5e4a4d2505587e27806c7b710c231c40c56f3bfc19535d0bb25df52264"
+ dependencies = [
+ "arc-swap",
+ "log",
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0060-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch b/packages/overlays/matrix-synapse/patches/0060-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch
new file mode 100644
index 0000000..a24ca1f
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0060-Bump-authlib-from-1.5.1-to-1.5.2-18452.patch
@@ -0,0 +1,249 @@
+From cd1a3ac584d9a353e24e42354ae71028654f7f61 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Mon, 19 May 2025 13:06:11 +0100
+Subject: [PATCH 60/74] Bump authlib from 1.5.1 to 1.5.2 (#18452)
+
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ poetry.lock | 55 +++++++++++++++++++++++++++--------------------------
+ 1 file changed, 28 insertions(+), 27 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index 7190d0f788..cf3ca18611 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -1,4 +1,4 @@
+-# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
++# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
+
+ [[package]]
+ name = "annotated-types"
+@@ -34,15 +34,15 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a
+
+ [[package]]
+ name = "authlib"
+-version = "1.5.1"
++version = "1.5.2"
+ description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
+ optional = true
+ python-versions = ">=3.9"
+ groups = ["main"]
+-markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
+ files = [
+- {file = "authlib-1.5.1-py2.py3-none-any.whl", hash = "sha256:8408861cbd9b4ea2ff759b00b6f02fd7d81ac5a56d0b2b22c08606c6049aae11"},
+- {file = "authlib-1.5.1.tar.gz", hash = "sha256:5cbc85ecb0667312c1cdc2f9095680bb735883b123fb509fde1e65b1c5df972e"},
++ {file = "authlib-1.5.2-py2.py3-none-any.whl", hash = "sha256:8804dd4402ac5e4a0435ac49e0b6e19e395357cfa632a3f624dcb4f6df13b4b1"},
++ {file = "authlib-1.5.2.tar.gz", hash = "sha256:fe85ec7e50c5f86f1e2603518bb3b4f632985eb4a355e52256530790e326c512"},
+ ]
+
+ [package.dependencies]
+@@ -451,7 +451,7 @@ description = "XML bomb protection for Python stdlib modules"
+ optional = true
+ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+ groups = ["main"]
+-markers = "extra == \"saml2\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"saml2\""
+ files = [
+ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
+ {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
+@@ -494,7 +494,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
+ optional = true
+ python-versions = ">=3.7"
+ groups = ["main"]
+-markers = "extra == \"saml2\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"saml2\""
+ files = [
+ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
+ {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
+@@ -544,7 +544,7 @@ description = "Python wrapper for hiredis"
+ optional = true
+ python-versions = ">=3.8"
+ groups = ["main"]
+-markers = "extra == \"redis\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"redis\""
+ files = [
+ {file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"},
+ {file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"},
+@@ -890,7 +890,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
+ optional = true
+ python-versions = ">=3.7"
+ groups = ["main"]
+-markers = "extra == \"opentracing\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"opentracing\""
+ files = [
+ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
+ ]
+@@ -1028,7 +1028,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
+ files = [
+ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
+ {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
+@@ -1044,7 +1044,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
+ optional = true
+ python-versions = ">=3.6"
+ groups = ["main"]
+-markers = "extra == \"url-preview\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"url-preview\""
+ files = [
+ {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"},
+ {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"},
+@@ -1330,7 +1330,7 @@ description = "An LDAP3 auth provider for Synapse"
+ optional = true
+ python-versions = ">=3.7"
+ groups = ["main"]
+-markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
+ files = [
+ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
+ {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
+@@ -1551,7 +1551,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"opentracing\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"opentracing\""
+ files = [
+ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
+ ]
+@@ -1720,7 +1720,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
+ optional = true
+ python-versions = ">=3.8"
+ groups = ["main"]
+-markers = "extra == \"postgres\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"postgres\""
+ files = [
+ {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
+ {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
+@@ -1728,6 +1728,7 @@ files = [
+ {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
+ {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
+ {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
++ {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
+ {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
+ {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
+ {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
+@@ -1740,7 +1741,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
++markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
+ files = [
+ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
+ ]
+@@ -1756,7 +1757,7 @@ description = "A Simple library to enable psycopg2 compatability"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
++markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
+ files = [
+ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
+ ]
+@@ -1979,7 +1980,7 @@ description = "Python extension wrapping the ICU C++ API"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"user-search\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"user-search\""
+ files = [
+ {file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
+ ]
+@@ -2028,7 +2029,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
+ optional = true
+ python-versions = ">=3.6"
+ groups = ["main"]
+-markers = "extra == \"cache-memory\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"cache-memory\""
+ files = [
+ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
+ {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
+@@ -2088,7 +2089,7 @@ description = "Python implementation of SAML Version 2 Standard"
+ optional = true
+ python-versions = ">=3.9,<4.0"
+ groups = ["main"]
+-markers = "extra == \"saml2\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"saml2\""
+ files = [
+ {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
+ {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
+@@ -2113,7 +2114,7 @@ description = "Extensions to the standard Python datetime module"
+ optional = true
+ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+ groups = ["main"]
+-markers = "extra == \"saml2\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"saml2\""
+ files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+@@ -2141,7 +2142,7 @@ description = "World timezone definitions, modern and historical"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"saml2\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"saml2\""
+ files = [
+ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
+ {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
+@@ -2505,7 +2506,7 @@ description = "Python client for Sentry (https://sentry.io)"
+ optional = true
+ python-versions = ">=3.6"
+ groups = ["main"]
+-markers = "extra == \"sentry\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"sentry\""
+ files = [
+ {file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"},
+ {file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"},
+@@ -2689,7 +2690,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"opentracing\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"opentracing\""
+ files = [
+ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
+ {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
+@@ -2705,7 +2706,7 @@ description = "Python bindings for the Apache Thrift RPC system"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"opentracing\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"opentracing\""
+ files = [
+ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
+ ]
+@@ -2767,7 +2768,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
+ optional = true
+ python-versions = ">=3.8"
+ groups = ["main"]
+-markers = "extra == \"opentracing\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"opentracing\""
+ files = [
+ {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"},
+ {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"},
+@@ -2901,7 +2902,7 @@ description = "non-blocking redis client for python"
+ optional = true
+ python-versions = "*"
+ groups = ["main"]
+-markers = "extra == \"redis\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"redis\""
+ files = [
+ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
+ {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
+@@ -3244,7 +3245,7 @@ description = "An XML Schema validator and decoder"
+ optional = true
+ python-versions = ">=3.7"
+ groups = ["main"]
+-markers = "extra == \"saml2\" or extra == \"all\""
++markers = "extra == \"all\" or extra == \"saml2\""
+ files = [
+ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
+ {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0061-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch b/packages/overlays/matrix-synapse/patches/0061-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch
new file mode 100644
index 0000000..782024f
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0061-Bump-pyopenssl-from-25.0.0-to-25.1.0-18450.patch
@@ -0,0 +1,40 @@
+From afeb0e01c552216d0d987cd504aab440b07bdb10 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Mon, 19 May 2025 13:06:45 +0100
+Subject: [PATCH 61/74] Bump pyopenssl from 25.0.0 to 25.1.0 (#18450)
+
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ poetry.lock | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index cf3ca18611..54ddad3bdd 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -2064,18 +2064,18 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
+
+ [[package]]
+ name = "pyopenssl"
+-version = "25.0.0"
++version = "25.1.0"
+ description = "Python wrapper module around the OpenSSL library"
+ optional = false
+ python-versions = ">=3.7"
+ groups = ["main"]
+ files = [
+- {file = "pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90"},
+- {file = "pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16"},
++ {file = "pyopenssl-25.1.0-py3-none-any.whl", hash = "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab"},
++ {file = "pyopenssl-25.1.0.tar.gz", hash = "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b"},
+ ]
+
+ [package.dependencies]
+-cryptography = ">=41.0.5,<45"
++cryptography = ">=41.0.5,<46"
+ typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
+
+ [package.extras]
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0062-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch b/packages/overlays/matrix-synapse/patches/0062-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch
new file mode 100644
index 0000000..535f783
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0062-Bump-docker-build-push-action-from-6.16.0-to-6.17.0-.patch
@@ -0,0 +1,27 @@
+From 17e6b32966670550c5fb4f232b390dd25ec77759 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Mon, 19 May 2025 13:07:24 +0100
+Subject: [PATCH 62/74] Bump docker/build-push-action from 6.16.0 to 6.17.0
+ (#18449)
+
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ .github/workflows/docker.yml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
+index 009089db3a..feeadf170d 100644
+--- a/.github/workflows/docker.yml
++++ b/.github/workflows/docker.yml
+@@ -72,7 +72,7 @@ jobs:
+
+ - name: Build and push all platforms
+ id: build-and-push
+- uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
++ uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
+ with:
+ push: true
+ labels: |
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0063-Fix-up-the-topological-ordering-for-events-above-MAX.patch b/packages/overlays/matrix-synapse/patches/0063-Fix-up-the-topological-ordering-for-events-above-MAX.patch
new file mode 100644
index 0000000..7f666b4
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0063-Fix-up-the-topological-ordering-for-events-above-MAX.patch
@@ -0,0 +1,342 @@
+From 67920c0aca6bb23f76390fa4827ce2e6e1889547 Mon Sep 17 00:00:00 2001
+From: Erik Johnston <erikj@element.io>
+Date: Mon, 19 May 2025 13:36:30 +0100
+Subject: [PATCH 63/74] Fix up the topological ordering for events above
+ `MAX_DEPTH` (#18447)
+
+Synapse previously did not correctly cap the max depth of an event to
+the max canonical json int. This can cause ordering issues for any
+events that were sent locally at the time.
+
+This background update goes and correctly caps the topological ordering
+to the new `MAX_DEPTH`.
+
+c.f. GHSA-v56r-hwv5-mxg6
+
+---------
+
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18447.bugfix | 1 +
+ .../databases/main/events_bg_updates.py | 82 ++++++++-
+ .../main/delta/92/05_fixup_max_depth_cap.sql | 17 ++
+ synapse/types/storage/__init__.py | 2 +
+ tests/storage/test_events_bg_updates.py | 157 ++++++++++++++++++
+ 5 files changed, 258 insertions(+), 1 deletion(-)
+ create mode 100644 changelog.d/18447.bugfix
+ create mode 100644 synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql
+ create mode 100644 tests/storage/test_events_bg_updates.py
+
+diff --git a/changelog.d/18447.bugfix b/changelog.d/18447.bugfix
+new file mode 100644
+index 0000000000..578be1ffe9
+--- /dev/null
++++ b/changelog.d/18447.bugfix
+@@ -0,0 +1 @@
++Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6).
+diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
+index 4b0bdd79c6..5c83a9f779 100644
+--- a/synapse/storage/databases/main/events_bg_updates.py
++++ b/synapse/storage/databases/main/events_bg_updates.py
+@@ -24,7 +24,12 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
+
+ import attr
+
+-from synapse.api.constants import EventContentFields, Membership, RelationTypes
++from synapse.api.constants import (
++ MAX_DEPTH,
++ EventContentFields,
++ Membership,
++ RelationTypes,
++)
+ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+ from synapse.events import EventBase, make_event_from_dict
+ from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
+@@ -311,6 +316,10 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
+ self._sliding_sync_membership_snapshots_fix_forgotten_column_bg_update,
+ )
+
++ self.db_pool.updates.register_background_update_handler(
++ _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP, self.fixup_max_depth_cap_bg_update
++ )
++
+ # We want this to run on the main database at startup before we start processing
+ # events.
+ #
+@@ -2547,6 +2556,77 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
+
+ return num_rows
+
++ async def fixup_max_depth_cap_bg_update(
++ self, progress: JsonDict, batch_size: int
++ ) -> int:
++ """Fixes the topological ordering for events that have a depth greater
++ than MAX_DEPTH. This should fix /messages ordering oddities."""
++
++ room_id_bound = progress.get("room_id", "")
++
++ def redo_max_depth_bg_update_txn(txn: LoggingTransaction) -> Tuple[bool, int]:
++ txn.execute(
++ """
++ SELECT room_id, room_version FROM rooms
++ WHERE room_id > ?
++ ORDER BY room_id
++ LIMIT ?
++ """,
++ (room_id_bound, batch_size),
++ )
++
++ # Find the next room ID to process, with a relevant room version.
++ room_ids: List[str] = []
++ max_room_id: Optional[str] = None
++ for room_id, room_version_str in txn:
++ max_room_id = room_id
++
++ # We only want to process rooms with a known room version that
++ # has strict canonical json validation enabled.
++ room_version = KNOWN_ROOM_VERSIONS.get(room_version_str)
++ if room_version and room_version.strict_canonicaljson:
++ room_ids.append(room_id)
++
++ if max_room_id is None:
++ # The query did not return any rooms, so we are done.
++ return True, 0
++
++ # Update the progress to the last room ID we pulled from the DB,
++ # this ensures we always make progress.
++ self.db_pool.updates._background_update_progress_txn(
++ txn,
++ _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP,
++ progress={"room_id": max_room_id},
++ )
++
++ if not room_ids:
++ # There were no rooms in this batch that required the fix.
++ return False, 0
++
++ clause, list_args = make_in_list_sql_clause(
++ self.database_engine, "room_id", room_ids
++ )
++ sql = f"""
++ UPDATE events SET topological_ordering = ?
++ WHERE topological_ordering > ? AND {clause}
++ """
++ args = [MAX_DEPTH, MAX_DEPTH]
++ args.extend(list_args)
++ txn.execute(sql, args)
++
++ return False, len(room_ids)
++
++ done, num_rooms = await self.db_pool.runInteraction(
++ "redo_max_depth_bg_update", redo_max_depth_bg_update_txn
++ )
++
++ if done:
++ await self.db_pool.updates._end_background_update(
++ _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP
++ )
++
++ return num_rooms
++
+
+ def _resolve_stale_data_in_sliding_sync_tables(
+ txn: LoggingTransaction,
+diff --git a/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql b/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql
+new file mode 100644
+index 0000000000..c1ebf8b58b
+--- /dev/null
++++ b/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql
+@@ -0,0 +1,17 @@
++--
++-- This file is licensed under the Affero General Public License (AGPL) version 3.
++--
++-- Copyright (C) 2025 New Vector, Ltd
++--
++-- This program is free software: you can redistribute it and/or modify
++-- it under the terms of the GNU Affero General Public License as
++-- published by the Free Software Foundation, either version 3 of the
++-- License, or (at your option) any later version.
++--
++-- See the GNU Affero General Public License for more details:
++-- <https://www.gnu.org/licenses/agpl-3.0.html>.
++
++-- Background update that fixes any events with a topological ordering above the
++-- MAX_DEPTH value.
++INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
++ (9205, 'fixup_max_depth_cap', '{}');
+diff --git a/synapse/types/storage/__init__.py b/synapse/types/storage/__init__.py
+index e03ff7ffc8..378a15e038 100644
+--- a/synapse/types/storage/__init__.py
++++ b/synapse/types/storage/__init__.py
+@@ -52,3 +52,5 @@ class _BackgroundUpdates:
+ MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE = (
+ "mark_unreferenced_state_groups_for_deletion_bg_update"
+ )
++
++ FIXUP_MAX_DEPTH_CAP = "fixup_max_depth_cap"
+diff --git a/tests/storage/test_events_bg_updates.py b/tests/storage/test_events_bg_updates.py
+new file mode 100644
+index 0000000000..ecdf413e3b
+--- /dev/null
++++ b/tests/storage/test_events_bg_updates.py
+@@ -0,0 +1,157 @@
++#
++# This file is licensed under the Affero General Public License (AGPL) version 3.
++#
++# Copyright (C) 2025 New Vector, Ltd
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU Affero General Public License as
++# published by the Free Software Foundation, either version 3 of the
++# License, or (at your option) any later version.
++#
++# See the GNU Affero General Public License for more details:
++# <https://www.gnu.org/licenses/agpl-3.0.html>.
++#
++#
++
++from typing import Dict
++
++from twisted.test.proto_helpers import MemoryReactor
++
++from synapse.api.constants import MAX_DEPTH
++from synapse.api.room_versions import RoomVersion, RoomVersions
++from synapse.server import HomeServer
++from synapse.util import Clock
++
++from tests.unittest import HomeserverTestCase
++
++
++class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase):
++ """Test the background update that caps topological_ordering at MAX_DEPTH."""
++
++ def prepare(
++ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
++ ) -> None:
++ self.store = self.hs.get_datastores().main
++ self.db_pool = self.store.db_pool
++
++ self.room_id = "!testroom:example.com"
++
++ # Reinsert the background update as it was already run at the start of
++ # the test.
++ self.get_success(
++ self.db_pool.simple_insert(
++ table="background_updates",
++ values={
++ "update_name": "fixup_max_depth_cap",
++ "progress_json": "{}",
++ },
++ )
++ )
++
++ def create_room(self, room_version: RoomVersion) -> Dict[str, int]:
++ """Create a room with a known room version and insert events.
++
++ Returns the set of event IDs that exceed MAX_DEPTH and
++ their depth.
++ """
++
++ # Create a room with a specific room version
++ self.get_success(
++ self.db_pool.simple_insert(
++ table="rooms",
++ values={
++ "room_id": self.room_id,
++ "room_version": room_version.identifier,
++ },
++ )
++ )
++
++ # Insert events with some depths exceeding MAX_DEPTH
++ event_id_to_depth: Dict[str, int] = {}
++ for depth in range(MAX_DEPTH - 5, MAX_DEPTH + 5):
++ event_id = f"$event{depth}:example.com"
++ event_id_to_depth[event_id] = depth
++
++ self.get_success(
++ self.db_pool.simple_insert(
++ table="events",
++ values={
++ "event_id": event_id,
++ "room_id": self.room_id,
++ "topological_ordering": depth,
++ "depth": depth,
++ "type": "m.test",
++ "sender": "@user:test",
++ "processed": True,
++ "outlier": False,
++ },
++ )
++ )
++
++ return event_id_to_depth
++
++ def test_fixup_max_depth_cap_bg_update(self) -> None:
++ """Test that the background update correctly caps topological_ordering
++ at MAX_DEPTH."""
++
++ event_id_to_depth = self.create_room(RoomVersions.V6)
++
++ # Run the background update
++ progress = {"room_id": ""}
++ batch_size = 10
++ num_rooms = self.get_success(
++ self.store.fixup_max_depth_cap_bg_update(progress, batch_size)
++ )
++
++ # Verify the number of rooms processed
++ self.assertEqual(num_rooms, 1)
++
++ # Verify that the topological_ordering of events has been capped at
++ # MAX_DEPTH
++ rows = self.get_success(
++ self.db_pool.simple_select_list(
++ table="events",
++ keyvalues={"room_id": self.room_id},
++ retcols=["event_id", "topological_ordering"],
++ )
++ )
++
++ for event_id, topological_ordering in rows:
++ if event_id_to_depth[event_id] >= MAX_DEPTH:
++ # Events with a depth greater than or equal to MAX_DEPTH should
++ # be capped at MAX_DEPTH.
++ self.assertEqual(topological_ordering, MAX_DEPTH)
++ else:
++ # Events with a depth less than MAX_DEPTH should remain
++ # unchanged.
++ self.assertEqual(topological_ordering, event_id_to_depth[event_id])
++
++ def test_fixup_max_depth_cap_bg_update_old_room_version(self) -> None:
++ """Test that the background update does not cap topological_ordering for
++ rooms with old room versions."""
++
++ event_id_to_depth = self.create_room(RoomVersions.V5)
++
++ # Run the background update
++ progress = {"room_id": ""}
++ batch_size = 10
++ num_rooms = self.get_success(
++ self.store.fixup_max_depth_cap_bg_update(progress, batch_size)
++ )
++
++ # Verify the number of rooms processed
++ self.assertEqual(num_rooms, 0)
++
++ # Verify that the topological_ordering of events has been capped at
++ # MAX_DEPTH
++ rows = self.get_success(
++ self.db_pool.simple_select_list(
++ table="events",
++ keyvalues={"room_id": self.room_id},
++ retcols=["event_id", "topological_ordering"],
++ )
++ )
++
++ # Assert that the topological_ordering of events has not been changed
++ # from their depth.
++ self.assertDictEqual(event_id_to_depth, dict(rows))
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0064-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch b/packages/overlays/matrix-synapse/patches/0064-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch
new file mode 100644
index 0000000..f216b38
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0064-Allow-only-requiring-a-field-be-present-in-an-SSO-re.patch
@@ -0,0 +1,175 @@
+From 1f4ae2f9eb94808f651b683b4650092015ec39e1 Mon Sep 17 00:00:00 2001
+From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+Date: Mon, 19 May 2025 17:50:02 +0100
+Subject: [PATCH 64/74] Allow only requiring a field be present in an SSO
+ response, rather than specifying a required value (#18454)
+
+---
+ changelog.d/18454.misc | 1 +
+ .../configuration/config_documentation.md | 10 ++-
+ synapse/config/sso.py | 7 +-
+ tests/handlers/test_oidc.py | 77 ++++++++++++++++++-
+ 4 files changed, 86 insertions(+), 9 deletions(-)
+ create mode 100644 changelog.d/18454.misc
+
+diff --git a/changelog.d/18454.misc b/changelog.d/18454.misc
+new file mode 100644
+index 0000000000..892fbd1d94
+--- /dev/null
++++ b/changelog.d/18454.misc
+@@ -0,0 +1 @@
++Allow checking only for the existence of a field in an SSO provider's response, rather than requiring the value(s) to check.
+\ No newline at end of file
+diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
+index e688bc5cd8..3927b9ca14 100644
+--- a/docs/usage/configuration/config_documentation.md
++++ b/docs/usage/configuration/config_documentation.md
+@@ -3782,17 +3782,23 @@ match particular values in the OIDC userinfo. The requirements can be listed und
+ ```yaml
+ attribute_requirements:
+ - attribute: family_name
+- value: "Stephensson"
++ one_of: ["Stephensson", "Smith"]
+ - attribute: groups
+ value: "admin"
++ # If `value` or `one_of` are not specified, the attribute only needs
++ # to exist, regardless of value.
++ - attribute: picture
+ ```
++
++`attribute` is a required field, while `value` and `one_of` are optional.
++
+ All of the listed attributes must match for the login to be permitted. Additional attributes can be added to
+ userinfo by expanding the `scopes` section of the OIDC config to retrieve
+ additional information from the OIDC provider.
+
+ If the OIDC claim is a list, then the attribute must match any value in the list.
+ Otherwise, it must exactly match the value of the claim. Using the example
+-above, the `family_name` claim MUST be "Stephensson", but the `groups`
++above, the `family_name` claim MUST be either "Stephensson" or "Smith", but the `groups`
+ claim MUST contain "admin".
+
+ Example configuration:
+diff --git a/synapse/config/sso.py b/synapse/config/sso.py
+index 97b85e47ea..cf27a7ee13 100644
+--- a/synapse/config/sso.py
++++ b/synapse/config/sso.py
+@@ -43,8 +43,7 @@ class SsoAttributeRequirement:
+ """Object describing a single requirement for SSO attributes."""
+
+ attribute: str
+- # If neither value nor one_of is given, the attribute must simply exist. This is
+- # only true for CAS configs which use a different JSON schema than the one below.
++ # If neither `value` nor `one_of` is given, the attribute must simply exist.
+ value: Optional[str] = None
+ one_of: Optional[List[str]] = None
+
+@@ -56,10 +55,6 @@ class SsoAttributeRequirement:
+ "one_of": {"type": "array", "items": {"type": "string"}},
+ },
+ "required": ["attribute"],
+- "oneOf": [
+- {"required": ["value"]},
+- {"required": ["one_of"]},
+- ],
+ }
+
+
+diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py
+index e5f31d57ca..ff8e3c5cb6 100644
+--- a/tests/handlers/test_oidc.py
++++ b/tests/handlers/test_oidc.py
+@@ -1453,7 +1453,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
+ }
+ }
+ )
+- def test_attribute_requirements_one_of(self) -> None:
++ def test_attribute_requirements_one_of_succeeds(self) -> None:
+ """Test that auth succeeds if userinfo attribute has multiple values and CONTAINS required value"""
+ # userinfo with "test": ["bar"] attribute should succeed.
+ userinfo = {
+@@ -1475,6 +1475,81 @@ class OidcHandlerTestCase(HomeserverTestCase):
+ auth_provider_session_id=None,
+ )
+
++ @override_config(
++ {
++ "oidc_config": {
++ **DEFAULT_CONFIG,
++ "attribute_requirements": [
++ {"attribute": "test", "one_of": ["foo", "bar"]}
++ ],
++ }
++ }
++ )
++ def test_attribute_requirements_one_of_fails(self) -> None:
++ """Test that auth fails if userinfo attribute has multiple values yet
++ DOES NOT CONTAIN a required value
++ """
++ # userinfo with "test": ["something else"] attribute should fail.
++ userinfo = {
++ "sub": "tester",
++ "username": "tester",
++ "test": ["something else"],
++ }
++ request, _ = self.start_authorization(userinfo)
++ self.get_success(self.handler.handle_oidc_callback(request))
++ self.complete_sso_login.assert_not_called()
++
++ @override_config(
++ {
++ "oidc_config": {
++ **DEFAULT_CONFIG,
++ "attribute_requirements": [{"attribute": "test"}],
++ }
++ }
++ )
++ def test_attribute_requirements_does_not_exist(self) -> None:
++ """OIDC login fails if the required attribute does not exist in the OIDC userinfo response."""
++ # userinfo lacking "test" attribute should fail.
++ userinfo = {
++ "sub": "tester",
++ "username": "tester",
++ }
++ request, _ = self.start_authorization(userinfo)
++ self.get_success(self.handler.handle_oidc_callback(request))
++ self.complete_sso_login.assert_not_called()
++
++ @override_config(
++ {
++ "oidc_config": {
++ **DEFAULT_CONFIG,
++ "attribute_requirements": [{"attribute": "test"}],
++ }
++ }
++ )
++ def test_attribute_requirements_exist(self) -> None:
++ """OIDC login succeeds if the required attribute exist (regardless of value)
++ in the OIDC userinfo response.
++ """
++ # userinfo with "test" attribute and random value should succeed.
++ userinfo = {
++ "sub": "tester",
++ "username": "tester",
++ "test": random_string(5), # value does not matter
++ }
++ request, _ = self.start_authorization(userinfo)
++ self.get_success(self.handler.handle_oidc_callback(request))
++
++ # check that the auth handler got called as expected
++ self.complete_sso_login.assert_called_once_with(
++ "@tester:test",
++ self.provider.idp_id,
++ request,
++ ANY,
++ None,
++ new_user=True,
++ auth_provider_session_id=None,
++ )
++
+ @override_config(
+ {
+ "oidc_config": {
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0065-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch b/packages/overlays/matrix-synapse/patches/0065-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch
new file mode 100644
index 0000000..0e7362d
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0065-Bump-setuptools-from-72.1.0-to-78.1.1-18461.patch
@@ -0,0 +1,49 @@
+From 303c5c4daa6986a91ab4632bd4df0448199b1813 Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 20 May 2025 12:03:10 +0100
+Subject: [PATCH 65/74] Bump setuptools from 72.1.0 to 78.1.1 (#18461)
+
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+---
+ poetry.lock | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/poetry.lock b/poetry.lock
+index 54ddad3bdd..3c53dfb376 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -2584,20 +2584,24 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"]
+
+ [[package]]
+ name = "setuptools"
+-version = "72.1.0"
++version = "78.1.1"
+ description = "Easily download, build, install, upgrade, and uninstall Python packages"
+ optional = false
+-python-versions = ">=3.8"
++python-versions = ">=3.9"
+ groups = ["main", "dev"]
+ files = [
+- {file = "setuptools-72.1.0-py3-none-any.whl", hash = "sha256:5a03e1860cf56bb6ef48ce186b0e557fdba433237481a9a625176c2831be15d1"},
+- {file = "setuptools-72.1.0.tar.gz", hash = "sha256:8d243eff56d095e5817f796ede6ae32941278f542e0f941867cc05ae52b162ec"},
++ {file = "setuptools-78.1.1-py3-none-any.whl", hash = "sha256:c3a9c4211ff4c309edb8b8c4f1cbfa7ae324c4ba9f91ff254e3d305b9fd54561"},
++ {file = "setuptools-78.1.1.tar.gz", hash = "sha256:fcc17fd9cd898242f6b4adfaca46137a9edef687f43e6f78469692a5e70d851d"},
+ ]
+
+ [package.extras]
+-core = ["importlib-metadata (>=6) ; python_version < \"3.10\"", "importlib-resources (>=5.10.2) ; python_version < \"3.9\"", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"]
+-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+-test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-ruff (<0.4) ; platform_system == \"Windows\"", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "pytest-ruff (>=0.3.2) ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
++check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""]
++core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"]
++cover = ["pytest-cov"]
++doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
++enabler = ["pytest-enabler (>=2.2)"]
++test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
++type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"]
+
+ [[package]]
+ name = "setuptools-rust"
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0066-Update-postgres.md-18445.patch b/packages/overlays/matrix-synapse/patches/0066-Update-postgres.md-18445.patch
new file mode 100644
index 0000000..dec69ac
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0066-Update-postgres.md-18445.patch
@@ -0,0 +1,40 @@
+From a6cb3533db77ebeb6b7ed86fb3d3dd86c046f4a4 Mon Sep 17 00:00:00 2001
+From: Strac Consulting Engineers Pty Ltd <preminik@preminik.com>
+Date: Tue, 20 May 2025 23:31:05 +1000
+Subject: [PATCH 66/74] Update postgres.md (#18445)
+
+---
+ changelog.d/18445.doc | 1 +
+ docs/postgres.md | 8 ++++++++
+ 2 files changed, 9 insertions(+)
+ create mode 100644 changelog.d/18445.doc
+
+diff --git a/changelog.d/18445.doc b/changelog.d/18445.doc
+new file mode 100644
+index 0000000000..1e05a791b2
+--- /dev/null
++++ b/changelog.d/18445.doc
+@@ -0,0 +1 @@
++Add advice for upgrading between major PostgreSQL versions to the database documentation.
+diff --git a/docs/postgres.md b/docs/postgres.md
+index 51670667e8..d51f54c722 100644
+--- a/docs/postgres.md
++++ b/docs/postgres.md
+@@ -100,6 +100,14 @@ database:
+ keepalives_count: 3
+ ```
+
++## Postgresql major version upgrades
++
++Postgres uses separate directories for database locations between major versions (typically `/var/lib/postgresql/<version>/main`).
++
++Therefore, it is recommended to stop Synapse and other services (MAS, etc) before upgrading Postgres major versions.
++
++It is also strongly recommended to [back up](./usage/administration/backups.md#database) your database beforehand to ensure no data loss arising from a failed upgrade.
++
+ ## Backups
+
+ Don't forget to [back up](./usage/administration/backups.md#database) your database!
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0067-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch b/packages/overlays/matrix-synapse/patches/0067-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch
new file mode 100644
index 0000000..ab6c95d
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0067-Bump-ruff-from-0.7.3-to-0.11.10-18451.patch
@@ -0,0 +1,1259 @@
+From 9d43bec3268d9a454fe992f25edfc013a50fb9cc Mon Sep 17 00:00:00 2001
+From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
+Date: Tue, 20 May 2025 15:23:30 +0100
+Subject: [PATCH 67/74] Bump ruff from 0.7.3 to 0.11.10 (#18451)
+
+Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+Co-authored-by: Andrew Morgan <andrew@amorgan.xyz>
+Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+---
+ changelog.d/18451.misc | 1 +
+ poetry.lock | 40 +++++++++----------
+ pyproject.toml | 2 +-
+ synapse/_scripts/synapse_port_db.py | 2 +-
+ synapse/_scripts/synctl.py | 6 +--
+ synapse/app/generic_worker.py | 3 +-
+ synapse/app/homeserver.py | 3 +-
+ synapse/config/tls.py | 3 +-
+ synapse/event_auth.py | 3 +-
+ synapse/handlers/e2e_keys.py | 12 ++----
+ synapse/handlers/federation.py | 6 +--
+ synapse/handlers/message.py | 22 +++++-----
+ synapse/handlers/sso.py | 6 +--
+ synapse/http/matrixfederationclient.py | 6 +--
+ synapse/http/proxyagent.py | 12 +++---
+ synapse/http/servlet.py | 6 +--
+ synapse/module_api/__init__.py | 6 +--
+ synapse/replication/http/_base.py | 6 +--
+ synapse/replication/tcp/streams/events.py | 6 +--
+ synapse/rest/admin/__init__.py | 3 +-
+ synapse/rest/client/receipts.py | 4 +-
+ synapse/rest/client/rendezvous.py | 6 +--
+ synapse/rest/client/transactions.py | 6 +--
+ synapse/storage/background_updates.py | 12 +++---
+ synapse/storage/controllers/persist_events.py | 3 +-
+ synapse/storage/databases/main/client_ips.py | 12 +++---
+ synapse/storage/databases/main/deviceinbox.py | 6 +--
+ synapse/storage/databases/main/devices.py | 2 +-
+ synapse/storage/databases/main/events.py | 27 ++++++-------
+ .../storage/databases/main/events_worker.py | 6 +--
+ .../databases/main/monthly_active_users.py | 24 +++++------
+ .../storage/databases/main/purge_events.py | 3 +-
+ .../storage/databases/main/state_deltas.py | 6 +--
+ synapse/storage/databases/main/tags.py | 5 +--
+ .../storage/databases/main/user_directory.py | 6 +--
+ synapse/storage/databases/state/bg_updates.py | 3 +-
+ synapse/storage/schema/main/delta/25/fts.py | 3 +-
+ synapse/storage/schema/main/delta/27/ts.py | 3 +-
+ .../schema/main/delta/31/search_update.py | 3 +-
+ .../schema/main/delta/33/event_fields.py | 3 +-
+ synapse/types/__init__.py | 3 +-
+ synapse/types/state.py | 2 +-
+ synapse/util/iterutils.py | 4 +-
+ .../test_federation_out_of_band_membership.py | 18 ++++-----
+ tests/handlers/test_user_directory.py | 4 +-
+ tests/http/test_matrixfederationclient.py | 8 +---
+ tests/media/test_media_storage.py | 4 +-
+ tests/replication/tcp/streams/test_events.py | 2 +-
+ tests/rest/admin/test_room.py | 2 +-
+ tests/rest/admin/test_user.py | 4 +-
+ .../sliding_sync/test_rooms_timeline.py | 6 +--
+ tests/rest/client/test_media.py | 2 +-
+ tests/rest/client/utils.py | 6 +--
+ tests/rest/media/test_url_preview.py | 2 +-
+ tests/server.py | 6 +--
+ tests/storage/test_base.py | 2 +-
+ tests/storage/test_devices.py | 6 +--
+ tests/storage/test_event_federation.py | 2 +-
+ tests/test_state.py | 2 +-
+ tests/test_utils/logging_setup.py | 2 +-
+ 60 files changed, 178 insertions(+), 206 deletions(-)
+ create mode 100644 changelog.d/18451.misc
+
+diff --git a/changelog.d/18451.misc b/changelog.d/18451.misc
+new file mode 100644
+index 0000000000..593e83eb7f
+--- /dev/null
++++ b/changelog.d/18451.misc
+@@ -0,0 +1 @@
++Bump ruff from 0.7.3 to 0.11.10.
+\ No newline at end of file
+diff --git a/poetry.lock b/poetry.lock
+index 3c53dfb376..ada0646215 100644
+--- a/poetry.lock
++++ b/poetry.lock
+@@ -2440,30 +2440,30 @@ files = [
+
+ [[package]]
+ name = "ruff"
+-version = "0.7.3"
++version = "0.11.10"
+ description = "An extremely fast Python linter and code formatter, written in Rust."
+ optional = false
+ python-versions = ">=3.7"
+ groups = ["dev"]
+ files = [
+- {file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"},
+- {file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"},
+- {file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"},
+- {file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"},
+- {file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"},
+- {file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"},
+- {file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"},
+- {file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"},
+- {file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"},
+- {file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"},
+- {file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"},
+- {file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"},
+- {file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"},
+- {file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"},
+- {file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"},
+- {file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"},
+- {file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"},
+- {file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"},
++ {file = "ruff-0.11.10-py3-none-linux_armv6l.whl", hash = "sha256:859a7bfa7bc8888abbea31ef8a2b411714e6a80f0d173c2a82f9041ed6b50f58"},
++ {file = "ruff-0.11.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:968220a57e09ea5e4fd48ed1c646419961a0570727c7e069842edd018ee8afed"},
++ {file = "ruff-0.11.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:1067245bad978e7aa7b22f67113ecc6eb241dca0d9b696144256c3a879663bca"},
++ {file = "ruff-0.11.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4854fd09c7aed5b1590e996a81aeff0c9ff51378b084eb5a0b9cd9518e6cff2"},
++ {file = "ruff-0.11.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b4564e9f99168c0f9195a0fd5fa5928004b33b377137f978055e40008a082c5"},
++ {file = "ruff-0.11.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b6a9cc5b62c03cc1fea0044ed8576379dbaf751d5503d718c973d5418483641"},
++ {file = "ruff-0.11.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:607ecbb6f03e44c9e0a93aedacb17b4eb4f3563d00e8b474298a201622677947"},
++ {file = "ruff-0.11.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b3a522fa389402cd2137df9ddefe848f727250535c70dafa840badffb56b7a4"},
++ {file = "ruff-0.11.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f071b0deed7e9245d5820dac235cbdd4ef99d7b12ff04c330a241ad3534319f"},
++ {file = "ruff-0.11.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a60e3a0a617eafba1f2e4186d827759d65348fa53708ca547e384db28406a0b"},
++ {file = "ruff-0.11.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:da8ec977eaa4b7bf75470fb575bea2cb41a0e07c7ea9d5a0a97d13dbca697bf2"},
++ {file = "ruff-0.11.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ddf8967e08227d1bd95cc0851ef80d2ad9c7c0c5aab1eba31db49cf0a7b99523"},
++ {file = "ruff-0.11.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5a94acf798a82db188f6f36575d80609072b032105d114b0f98661e1679c9125"},
++ {file = "ruff-0.11.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3afead355f1d16d95630df28d4ba17fb2cb9c8dfac8d21ced14984121f639bad"},
++ {file = "ruff-0.11.10-py3-none-win32.whl", hash = "sha256:dc061a98d32a97211af7e7f3fa1d4ca2fcf919fb96c28f39551f35fc55bdbc19"},
++ {file = "ruff-0.11.10-py3-none-win_amd64.whl", hash = "sha256:5cc725fbb4d25b0f185cb42df07ab6b76c4489b4bfb740a175f3a59c70e8a224"},
++ {file = "ruff-0.11.10-py3-none-win_arm64.whl", hash = "sha256:ef69637b35fb8b210743926778d0e45e1bffa850a7c61e428c6b971549b5f5d1"},
++ {file = "ruff-0.11.10.tar.gz", hash = "sha256:d522fb204b4959909ecac47da02830daec102eeb100fb50ea9554818d47a5fa6"},
+ ]
+
+ [[package]]
+@@ -3394,4 +3394,4 @@ user-search = ["pyicu"]
+ [metadata]
+ lock-version = "2.1"
+ python-versions = "^3.9.0"
+-content-hash = "d71159b19349fdc0b7cd8e06e8c8778b603fc37b941c6df34ddc31746783d94d"
++content-hash = "522f5bacf5610646876452e0e397038dd5c959692d2ab76214431bff78562d01"
+diff --git a/pyproject.toml b/pyproject.toml
+index 914a5804aa..6ce05805a9 100644
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -320,7 +320,7 @@ all = [
+ # failing on new releases. Keeping lower bounds loose here means that dependabot
+ # can bump versions without having to update the content-hash in the lockfile.
+ # This helps prevents merge conflicts when running a batch of dependabot updates.
+-ruff = "0.7.3"
++ruff = "0.11.10"
+ # Type checking only works with the pydantic.v1 compat module from pydantic v2
+ pydantic = "^2"
+
+diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
+index 438b2ff8a0..573c70696e 100755
+--- a/synapse/_scripts/synapse_port_db.py
++++ b/synapse/_scripts/synapse_port_db.py
+@@ -1065,7 +1065,7 @@ class Porter:
+
+ def get_sent_table_size(txn: LoggingTransaction) -> int:
+ txn.execute(
+- "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
++ "SELECT count(*) FROM sent_transactions WHERE ts >= ?", (yesterday,)
+ )
+ result = txn.fetchone()
+ assert result is not None
+diff --git a/synapse/_scripts/synctl.py b/synapse/_scripts/synctl.py
+index 688df9485c..2e2aa27a17 100755
+--- a/synapse/_scripts/synctl.py
++++ b/synapse/_scripts/synctl.py
+@@ -292,9 +292,9 @@ def main() -> None:
+ for key in worker_config:
+ if key == "worker_app": # But we allow worker_app
+ continue
+- assert not key.startswith(
+- "worker_"
+- ), "Main process cannot use worker_* config"
++ assert not key.startswith("worker_"), (
++ "Main process cannot use worker_* config"
++ )
+ else:
+ worker_pidfile = worker_config["worker_pid_file"]
+ worker_cache_factor = worker_config.get("synctl_cache_factor")
+diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
+index f495d5b7e4..75c65ccc0d 100644
+--- a/synapse/app/generic_worker.py
++++ b/synapse/app/generic_worker.py
+@@ -287,8 +287,7 @@ class GenericWorkerServer(HomeServer):
+ elif listener.type == "metrics":
+ if not self.config.metrics.enable_metrics:
+ logger.warning(
+- "Metrics listener configured, but "
+- "enable_metrics is not True!"
++ "Metrics listener configured, but enable_metrics is not True!"
+ )
+ else:
+ if isinstance(listener, TCPListenerConfig):
+diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
+index 6da2194cf7..e027b5eaea 100644
+--- a/synapse/app/homeserver.py
++++ b/synapse/app/homeserver.py
+@@ -289,8 +289,7 @@ class SynapseHomeServer(HomeServer):
+ elif listener.type == "metrics":
+ if not self.config.metrics.enable_metrics:
+ logger.warning(
+- "Metrics listener configured, but "
+- "enable_metrics is not True!"
++ "Metrics listener configured, but enable_metrics is not True!"
+ )
+ else:
+ if isinstance(listener, TCPListenerConfig):
+diff --git a/synapse/config/tls.py b/synapse/config/tls.py
+index 51dc15eb61..a48d81fdc3 100644
+--- a/synapse/config/tls.py
++++ b/synapse/config/tls.py
+@@ -108,8 +108,7 @@ class TlsConfig(Config):
+ # Raise an error if this option has been specified without any
+ # corresponding certificates.
+ raise ConfigError(
+- "federation_custom_ca_list specified without "
+- "any certificate files"
++ "federation_custom_ca_list specified without any certificate files"
+ )
+
+ certs = []
+diff --git a/synapse/event_auth.py b/synapse/event_auth.py
+index 5ecf493f98..5999c264dc 100644
+--- a/synapse/event_auth.py
++++ b/synapse/event_auth.py
+@@ -986,8 +986,7 @@ def _check_power_levels(
+ if old_level == user_level:
+ raise AuthError(
+ 403,
+- "You don't have permission to remove ops level equal "
+- "to your own",
++ "You don't have permission to remove ops level equal to your own",
+ )
+
+ # Check if the old and new levels are greater than the user level
+diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
+index 540995e062..f2b2e30bf4 100644
+--- a/synapse/handlers/e2e_keys.py
++++ b/synapse/handlers/e2e_keys.py
+@@ -1163,7 +1163,7 @@ class E2eKeysHandler:
+ devices = devices[user_id]
+ except SynapseError as e:
+ failure = _exception_to_failure(e)
+- failures[user_id] = {device: failure for device in signatures.keys()}
++ failures[user_id] = dict.fromkeys(signatures.keys(), failure)
+ return signature_list, failures
+
+ for device_id, device in signatures.items():
+@@ -1303,7 +1303,7 @@ class E2eKeysHandler:
+ except SynapseError as e:
+ failure = _exception_to_failure(e)
+ for user, devicemap in signatures.items():
+- failures[user] = {device_id: failure for device_id in devicemap.keys()}
++ failures[user] = dict.fromkeys(devicemap.keys(), failure)
+ return signature_list, failures
+
+ for target_user, devicemap in signatures.items():
+@@ -1344,9 +1344,7 @@ class E2eKeysHandler:
+ # other devices were signed -- mark those as failures
+ logger.debug("upload signature: too many devices specified")
+ failure = _exception_to_failure(NotFoundError("Unknown device"))
+- failures[target_user] = {
+- device: failure for device in other_devices
+- }
++ failures[target_user] = dict.fromkeys(other_devices, failure)
+
+ if user_signing_key_id in master_key.get("signatures", {}).get(
+ user_id, {}
+@@ -1367,9 +1365,7 @@ class E2eKeysHandler:
+ except SynapseError as e:
+ failure = _exception_to_failure(e)
+ if device_id is None:
+- failures[target_user] = {
+- device_id: failure for device_id in devicemap.keys()
+- }
++ failures[target_user] = dict.fromkeys(devicemap.keys(), failure)
+ else:
+ failures.setdefault(target_user, {})[device_id] = failure
+
+diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
+index 17dd4af13e..b1640e3246 100644
+--- a/synapse/handlers/federation.py
++++ b/synapse/handlers/federation.py
+@@ -1312,9 +1312,9 @@ class FederationHandler:
+ if state_key is not None:
+ # the event was not rejected (get_event raises a NotFoundError for rejected
+ # events) so the state at the event should include the event itself.
+- assert (
+- state_map.get((event.type, state_key)) == event.event_id
+- ), "State at event did not include event itself"
++ assert state_map.get((event.type, state_key)) == event.event_id, (
++ "State at event did not include event itself"
++ )
+
+ # ... but we need the state *before* that event
+ if "replaces_state" in event.unsigned:
+diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
+index 52c61cfa54..ff6eb5a514 100644
+--- a/synapse/handlers/message.py
++++ b/synapse/handlers/message.py
+@@ -143,9 +143,9 @@ class MessageHandler:
+ elif membership == Membership.LEAVE:
+ key = (event_type, state_key)
+ # If the membership is not JOIN, then the event ID should exist.
+- assert (
+- membership_event_id is not None
+- ), "check_user_in_room_or_world_readable returned invalid data"
++ assert membership_event_id is not None, (
++ "check_user_in_room_or_world_readable returned invalid data"
++ )
+ room_state = await self._state_storage_controller.get_state_for_events(
+ [membership_event_id], StateFilter.from_types([key])
+ )
+@@ -242,9 +242,9 @@ class MessageHandler:
+ room_state = await self.store.get_events(state_ids.values())
+ elif membership == Membership.LEAVE:
+ # If the membership is not JOIN, then the event ID should exist.
+- assert (
+- membership_event_id is not None
+- ), "check_user_in_room_or_world_readable returned invalid data"
++ assert membership_event_id is not None, (
++ "check_user_in_room_or_world_readable returned invalid data"
++ )
+ room_state_events = (
+ await self._state_storage_controller.get_state_for_events(
+ [membership_event_id], state_filter=state_filter
+@@ -1266,12 +1266,14 @@ class EventCreationHandler:
+ # Allow an event to have empty list of prev_event_ids
+ # only if it has auth_event_ids.
+ or auth_event_ids
+- ), "Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
++ ), (
++ "Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
++ )
+ else:
+ # we now ought to have some prev_events (unless it's a create event).
+- assert (
+- builder.type == EventTypes.Create or prev_event_ids
+- ), "Attempting to create a non-m.room.create event with no prev_events"
++ assert builder.type == EventTypes.Create or prev_event_ids, (
++ "Attempting to create a non-m.room.create event with no prev_events"
++ )
+
+ if for_batch:
+ assert prev_event_ids is not None
+diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
+index 9c0d665461..07827cf95b 100644
+--- a/synapse/handlers/sso.py
++++ b/synapse/handlers/sso.py
+@@ -1192,9 +1192,9 @@ class SsoHandler:
+ """
+
+ # It is expected that this is the main process.
+- assert isinstance(
+- self._device_handler, DeviceHandler
+- ), "revoking SSO sessions can only be called on the main process"
++ assert isinstance(self._device_handler, DeviceHandler), (
++ "revoking SSO sessions can only be called on the main process"
++ )
+
+ # Invalidate any running user-mapping sessions
+ to_delete = []
+diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
+index f6d2536957..88bf98045c 100644
+--- a/synapse/http/matrixfederationclient.py
++++ b/synapse/http/matrixfederationclient.py
+@@ -425,9 +425,9 @@ class MatrixFederationHttpClient:
+ )
+ else:
+ proxy_authorization_secret = hs.config.worker.worker_replication_secret
+- assert (
+- proxy_authorization_secret is not None
+- ), "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)"
++ assert proxy_authorization_secret is not None, (
++ "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)"
++ )
+ federation_proxy_credentials = BearerProxyCredentials(
+ proxy_authorization_secret.encode("ascii")
+ )
+diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
+index fd16ee42dd..6817199035 100644
+--- a/synapse/http/proxyagent.py
++++ b/synapse/http/proxyagent.py
+@@ -173,9 +173,9 @@ class ProxyAgent(_AgentBase):
+ self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None
+ self._federation_proxy_credentials: Optional[ProxyCredentials] = None
+ if federation_proxy_locations:
+- assert (
+- federation_proxy_credentials is not None
+- ), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
++ assert federation_proxy_credentials is not None, (
++ "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
++ )
+
+ endpoints: List[IStreamClientEndpoint] = []
+ for federation_proxy_location in federation_proxy_locations:
+@@ -302,9 +302,9 @@ class ProxyAgent(_AgentBase):
+ parsed_uri.scheme == b"matrix-federation"
+ and self._federation_proxy_endpoint
+ ):
+- assert (
+- self._federation_proxy_credentials is not None
+- ), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
++ assert self._federation_proxy_credentials is not None, (
++ "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
++ )
+
+ # Set a Proxy-Authorization header
+ if headers is None:
+diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
+index ed6ab08336..47d8bd5eaf 100644
+--- a/synapse/http/servlet.py
++++ b/synapse/http/servlet.py
+@@ -582,9 +582,9 @@ def parse_enum(
+ is not one of those allowed values.
+ """
+ # Assert the enum values are strings.
+- assert all(
+- isinstance(e.value, str) for e in E
+- ), "parse_enum only works with string values"
++ assert all(isinstance(e.value, str) for e in E), (
++ "parse_enum only works with string values"
++ )
+ str_value = parse_string(
+ request,
+ name,
+diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
+index bf9532e891..7834da759c 100644
+--- a/synapse/module_api/__init__.py
++++ b/synapse/module_api/__init__.py
+@@ -894,9 +894,9 @@ class ModuleApi:
+ Raises:
+ synapse.api.errors.AuthError: the access token is invalid
+ """
+- assert isinstance(
+- self._device_handler, DeviceHandler
+- ), "invalidate_access_token can only be called on the main process"
++ assert isinstance(self._device_handler, DeviceHandler), (
++ "invalidate_access_token can only be called on the main process"
++ )
+
+ # see if the access token corresponds to a device
+ user_info = yield defer.ensureDeferred(
+diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
+index 9aa8d90bfe..0002538680 100644
+--- a/synapse/replication/http/_base.py
++++ b/synapse/replication/http/_base.py
+@@ -128,9 +128,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
+
+ # We reserve `instance_name` as a parameter to sending requests, so we
+ # assert here that sub classes don't try and use the name.
+- assert (
+- "instance_name" not in self.PATH_ARGS
+- ), "`instance_name` is a reserved parameter name"
++ assert "instance_name" not in self.PATH_ARGS, (
++ "`instance_name` is a reserved parameter name"
++ )
+ assert (
+ "instance_name"
+ not in signature(self.__class__._serialize_payload).parameters
+diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
+index ea0803dfc2..05b55fb033 100644
+--- a/synapse/replication/tcp/streams/events.py
++++ b/synapse/replication/tcp/streams/events.py
+@@ -200,9 +200,9 @@ class EventsStream(_StreamFromIdGen):
+
+ # we rely on get_all_new_forward_event_rows strictly honouring the limit, so
+ # that we know it is safe to just take upper_limit = event_rows[-1][0].
+- assert (
+- len(event_rows) <= target_row_count
+- ), "get_all_new_forward_event_rows did not honour row limit"
++ assert len(event_rows) <= target_row_count, (
++ "get_all_new_forward_event_rows did not honour row limit"
++ )
+
+ # if we hit the limit on event_updates, there's no point in going beyond the
+ # last stream_id in the batch for the other sources.
+diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
+index b1335fed66..e55cdc0470 100644
+--- a/synapse/rest/admin/__init__.py
++++ b/synapse/rest/admin/__init__.py
+@@ -207,8 +207,7 @@ class PurgeHistoryRestServlet(RestServlet):
+ (stream, topo, _event_id) = r
+ token = "t%d-%d" % (topo, stream)
+ logger.info(
+- "[purge] purging up to token %s (received_ts %i => "
+- "stream_ordering %i)",
++ "[purge] purging up to token %s (received_ts %i => stream_ordering %i)",
+ token,
+ ts,
+ stream_ordering,
+diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py
+index 89203dc45a..4bf93f485c 100644
+--- a/synapse/rest/client/receipts.py
++++ b/synapse/rest/client/receipts.py
+@@ -39,9 +39,7 @@ logger = logging.getLogger(__name__)
+
+ class ReceiptRestServlet(RestServlet):
+ PATTERNS = client_patterns(
+- "/rooms/(?P<room_id>[^/]*)"
+- "/receipt/(?P<receipt_type>[^/]*)"
+- "/(?P<event_id>[^/]*)$"
++ "/rooms/(?P<room_id>[^/]*)/receipt/(?P<receipt_type>[^/]*)/(?P<event_id>[^/]*)$"
+ )
+ CATEGORY = "Receipts requests"
+
+diff --git a/synapse/rest/client/rendezvous.py b/synapse/rest/client/rendezvous.py
+index 02f166b4ea..a1808847f0 100644
+--- a/synapse/rest/client/rendezvous.py
++++ b/synapse/rest/client/rendezvous.py
+@@ -44,9 +44,9 @@ class MSC4108DelegationRendezvousServlet(RestServlet):
+ redirection_target: Optional[str] = (
+ hs.config.experimental.msc4108_delegation_endpoint
+ )
+- assert (
+- redirection_target is not None
+- ), "Servlet is only registered if there is a delegation target"
++ assert redirection_target is not None, (
++ "Servlet is only registered if there is a delegation target"
++ )
+ self.endpoint = redirection_target.encode("utf-8")
+
+ async def on_POST(self, request: SynapseRequest) -> None:
+diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py
+index f791904168..1a57996aec 100644
+--- a/synapse/rest/client/transactions.py
++++ b/synapse/rest/client/transactions.py
+@@ -94,9 +94,9 @@ class HttpTransactionCache:
+ # (appservice and guest users), but does not cover access tokens minted
+ # by the admin API. Use the access token ID instead.
+ else:
+- assert (
+- requester.access_token_id is not None
+- ), "Requester must have an access_token_id"
++ assert requester.access_token_id is not None, (
++ "Requester must have an access_token_id"
++ )
+ return (path, "user_admin", requester.access_token_id)
+
+ def fetch_or_execute_request(
+diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
+index a02b4cc9ce..d170bbddaa 100644
+--- a/synapse/storage/background_updates.py
++++ b/synapse/storage/background_updates.py
+@@ -739,9 +739,9 @@ class BackgroundUpdater:
+ c.execute(sql)
+
+ async def updater(progress: JsonDict, batch_size: int) -> int:
+- assert isinstance(
+- self.db_pool.engine, engines.PostgresEngine
+- ), "validate constraint background update registered for non-Postres database"
++ assert isinstance(self.db_pool.engine, engines.PostgresEngine), (
++ "validate constraint background update registered for non-Postres database"
++ )
+
+ logger.info("Validating constraint %s to %s", constraint_name, table)
+ await self.db_pool.runWithConnection(runner)
+@@ -900,9 +900,9 @@ class BackgroundUpdater:
+ on the table. Used to iterate over the table.
+ """
+
+- assert isinstance(
+- self.db_pool.engine, engines.PostgresEngine
+- ), "validate constraint background update registered for non-Postres database"
++ assert isinstance(self.db_pool.engine, engines.PostgresEngine), (
++ "validate constraint background update registered for non-Postres database"
++ )
+
+ async def updater(progress: JsonDict, batch_size: int) -> int:
+ return await self.validate_constraint_and_delete_in_background(
+diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
+index 7963905479..f5131fe291 100644
+--- a/synapse/storage/controllers/persist_events.py
++++ b/synapse/storage/controllers/persist_events.py
+@@ -870,8 +870,7 @@ class EventsPersistenceStorageController:
+ # This should only happen for outlier events.
+ if not ev.internal_metadata.is_outlier():
+ raise Exception(
+- "Context for new event %s has no state "
+- "group" % (ev.event_id,)
++ "Context for new event %s has no state group" % (ev.event_id,)
+ )
+ continue
+ if ctx.state_group_deltas:
+diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
+index e8c322ab5c..69008804bd 100644
+--- a/synapse/storage/databases/main/client_ips.py
++++ b/synapse/storage/databases/main/client_ips.py
+@@ -650,9 +650,9 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
+
+ @wrap_as_background_process("update_client_ips")
+ async def _update_client_ips_batch(self) -> None:
+- assert (
+- self._update_on_this_worker
+- ), "This worker is not designated to update client IPs"
++ assert self._update_on_this_worker, (
++ "This worker is not designated to update client IPs"
++ )
+
+ # If the DB pool has already terminated, don't try updating
+ if not self.db_pool.is_running():
+@@ -671,9 +671,9 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
+ txn: LoggingTransaction,
+ to_update: Mapping[Tuple[str, str, str], Tuple[str, Optional[str], int]],
+ ) -> None:
+- assert (
+- self._update_on_this_worker
+- ), "This worker is not designated to update client IPs"
++ assert self._update_on_this_worker, (
++ "This worker is not designated to update client IPs"
++ )
+
+ # Keys and values for the `user_ips` upsert.
+ user_ips_keys = []
+diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
+index 0612b82b9b..d47833655d 100644
+--- a/synapse/storage/databases/main/deviceinbox.py
++++ b/synapse/storage/databases/main/deviceinbox.py
+@@ -200,9 +200,9 @@ class DeviceInboxWorkerStore(SQLBaseStore):
+ to_stream_id=to_stream_id,
+ )
+
+- assert (
+- last_processed_stream_id == to_stream_id
+- ), "Expected _get_device_messages to process all to-device messages up to `to_stream_id`"
++ assert last_processed_stream_id == to_stream_id, (
++ "Expected _get_device_messages to process all to-device messages up to `to_stream_id`"
++ )
+
+ return user_id_device_id_to_messages
+
+diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
+index 3f0b2f5d84..6191f22cd6 100644
+--- a/synapse/storage/databases/main/devices.py
++++ b/synapse/storage/databases/main/devices.py
+@@ -1092,7 +1092,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
+ ),
+ )
+
+- results: Dict[str, Optional[str]] = {user_id: None for user_id in user_ids}
++ results: Dict[str, Optional[str]] = dict.fromkeys(user_ids)
+ results.update(rows)
+
+ return results
+diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
+index 26fbc1a483..b7cc0433e7 100644
+--- a/synapse/storage/databases/main/events.py
++++ b/synapse/storage/databases/main/events.py
+@@ -246,9 +246,9 @@ class PersistEventsStore:
+ self.is_mine_id = hs.is_mine_id
+
+ # This should only exist on instances that are configured to write
+- assert (
+- hs.get_instance_name() in hs.config.worker.writers.events
+- ), "Can only instantiate EventsStore on master"
++ assert hs.get_instance_name() in hs.config.worker.writers.events, (
++ "Can only instantiate EventsStore on master"
++ )
+
+ # Since we have been configured to write, we ought to have id generators,
+ # rather than id trackers.
+@@ -465,9 +465,9 @@ class PersistEventsStore:
+ missing_membership_event_ids
+ )
+ # There shouldn't be any missing events
+- assert (
+- remaining_events.keys() == missing_membership_event_ids
+- ), missing_membership_event_ids.difference(remaining_events.keys())
++ assert remaining_events.keys() == missing_membership_event_ids, (
++ missing_membership_event_ids.difference(remaining_events.keys())
++ )
+ membership_event_map.update(remaining_events)
+
+ for (
+@@ -534,9 +534,9 @@ class PersistEventsStore:
+ missing_state_event_ids
+ )
+ # There shouldn't be any missing events
+- assert (
+- remaining_events.keys() == missing_state_event_ids
+- ), missing_state_event_ids.difference(remaining_events.keys())
++ assert remaining_events.keys() == missing_state_event_ids, (
++ missing_state_event_ids.difference(remaining_events.keys())
++ )
+ for event in remaining_events.values():
+ current_state_map[(event.type, event.state_key)] = event
+
+@@ -644,9 +644,9 @@ class PersistEventsStore:
+ if missing_event_ids:
+ remaining_events = await self.store.get_events(missing_event_ids)
+ # There shouldn't be any missing events
+- assert (
+- remaining_events.keys() == missing_event_ids
+- ), missing_event_ids.difference(remaining_events.keys())
++ assert remaining_events.keys() == missing_event_ids, (
++ missing_event_ids.difference(remaining_events.keys())
++ )
+ for event in remaining_events.values():
+ current_state_map[(event.type, event.state_key)] = event
+
+@@ -3448,8 +3448,7 @@ class PersistEventsStore:
+ # Delete all these events that we've already fetched and now know that their
+ # prev events are the new backwards extremeties.
+ query = (
+- "DELETE FROM event_backward_extremities"
+- " WHERE event_id = ? AND room_id = ?"
++ "DELETE FROM event_backward_extremities WHERE event_id = ? AND room_id = ?"
+ )
+ backward_extremity_tuples_to_remove = [
+ (ev.event_id, ev.room_id)
+diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
+index 82b2ad4408..3db4460f57 100644
+--- a/synapse/storage/databases/main/events_worker.py
++++ b/synapse/storage/databases/main/events_worker.py
+@@ -824,9 +824,9 @@ class EventsWorkerStore(SQLBaseStore):
+
+ if missing_events_ids:
+
+- async def get_missing_events_from_cache_or_db() -> (
+- Dict[str, EventCacheEntry]
+- ):
++ async def get_missing_events_from_cache_or_db() -> Dict[
++ str, EventCacheEntry
++ ]:
+ """Fetches the events in `missing_event_ids` from the database.
+
+ Also creates entries in `self._current_event_fetches` to allow
+diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
+index 8e948c5e8d..659ee13d71 100644
+--- a/synapse/storage/databases/main/monthly_active_users.py
++++ b/synapse/storage/databases/main/monthly_active_users.py
+@@ -304,9 +304,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
+ txn:
+ threepids: List of threepid dicts to reserve
+ """
+- assert (
+- self._update_on_this_worker
+- ), "This worker is not designated to update MAUs"
++ assert self._update_on_this_worker, (
++ "This worker is not designated to update MAUs"
++ )
+
+ # XXX what is this function trying to achieve? It upserts into
+ # monthly_active_users for each *registered* reserved mau user, but why?
+@@ -340,9 +340,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
+ Args:
+ user_id: user to add/update
+ """
+- assert (
+- self._update_on_this_worker
+- ), "This worker is not designated to update MAUs"
++ assert self._update_on_this_worker, (
++ "This worker is not designated to update MAUs"
++ )
+
+ # Support user never to be included in MAU stats. Note I can't easily call this
+ # from upsert_monthly_active_user_txn because then I need a _txn form of
+@@ -379,9 +379,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
+ txn:
+ user_id: user to add/update
+ """
+- assert (
+- self._update_on_this_worker
+- ), "This worker is not designated to update MAUs"
++ assert self._update_on_this_worker, (
++ "This worker is not designated to update MAUs"
++ )
+
+ # Am consciously deciding to lock the table on the basis that is ought
+ # never be a big table and alternative approaches (batching multiple
+@@ -409,9 +409,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
+ Args:
+ user_id: the user_id to query
+ """
+- assert (
+- self._update_on_this_worker
+- ), "This worker is not designated to update MAUs"
++ assert self._update_on_this_worker, (
++ "This worker is not designated to update MAUs"
++ )
+
+ if self._limit_usage_by_mau or self._mau_stats_only:
+ # Trial users and guests should not be included as part of MAU group
+diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
+index ebdeb8fbd7..a11f522f03 100644
+--- a/synapse/storage/databases/main/purge_events.py
++++ b/synapse/storage/databases/main/purge_events.py
+@@ -199,8 +199,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
+
+ # Update backward extremeties
+ txn.execute_batch(
+- "INSERT INTO event_backward_extremities (room_id, event_id)"
+- " VALUES (?, ?)",
++ "INSERT INTO event_backward_extremities (room_id, event_id) VALUES (?, ?)",
+ [(room_id, event_id) for (event_id,) in new_backwards_extrems],
+ )
+
+diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py
+index b90f667da8..00f87cc3a1 100644
+--- a/synapse/storage/databases/main/state_deltas.py
++++ b/synapse/storage/databases/main/state_deltas.py
+@@ -98,9 +98,9 @@ class StateDeltasStore(SQLBaseStore):
+ prev_stream_id = int(prev_stream_id)
+
+ # check we're not going backwards
+- assert (
+- prev_stream_id <= max_stream_id
+- ), f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}"
++ assert prev_stream_id <= max_stream_id, (
++ f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}"
++ )
+
+ if not self._curr_state_delta_stream_cache.has_any_entity_changed(
+ prev_stream_id
+diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py
+index 44f395f315..97b190bccc 100644
+--- a/synapse/storage/databases/main/tags.py
++++ b/synapse/storage/databases/main/tags.py
+@@ -274,10 +274,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
+ assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator)
+
+ def remove_tag_txn(txn: LoggingTransaction, next_id: int) -> None:
+- sql = (
+- "DELETE FROM room_tags "
+- " WHERE user_id = ? AND room_id = ? AND tag = ?"
+- )
++ sql = "DELETE FROM room_tags WHERE user_id = ? AND room_id = ? AND tag = ?"
+ txn.execute(sql, (user_id, room_id, tag))
+ self._update_revision_txn(txn, user_id, room_id, next_id)
+
+diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
+index 391f0dd638..2b867cdb6e 100644
+--- a/synapse/storage/databases/main/user_directory.py
++++ b/synapse/storage/databases/main/user_directory.py
+@@ -582,9 +582,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
+ retry_counter: number of failures in refreshing the profile so far. Used for
+ exponential backoff calculations.
+ """
+- assert not self.hs.is_mine_id(
+- user_id
+- ), "Can't mark a local user as a stale remote user."
++ assert not self.hs.is_mine_id(user_id), (
++ "Can't mark a local user as a stale remote user."
++ )
+
+ server_name = UserID.from_string(user_id).domain
+
+diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
+index 95fd0ae73a..5b594fe8dd 100644
+--- a/synapse/storage/databases/state/bg_updates.py
++++ b/synapse/storage/databases/state/bg_updates.py
+@@ -396,8 +396,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
+ return True, count
+
+ txn.execute(
+- "SELECT state_group FROM state_group_edges"
+- " WHERE state_group = ?",
++ "SELECT state_group FROM state_group_edges WHERE state_group = ?",
+ (state_group,),
+ )
+
+diff --git a/synapse/storage/schema/main/delta/25/fts.py b/synapse/storage/schema/main/delta/25/fts.py
+index b050cc16a7..c01c1325cb 100644
+--- a/synapse/storage/schema/main/delta/25/fts.py
++++ b/synapse/storage/schema/main/delta/25/fts.py
+@@ -75,8 +75,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
+ progress_json = json.dumps(progress)
+
+ sql = (
+- "INSERT into background_updates (update_name, progress_json)"
+- " VALUES (?, ?)"
++ "INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
+ )
+
+ cur.execute(sql, ("event_search", progress_json))
+diff --git a/synapse/storage/schema/main/delta/27/ts.py b/synapse/storage/schema/main/delta/27/ts.py
+index d7f360b6e6..e6e73e1b77 100644
+--- a/synapse/storage/schema/main/delta/27/ts.py
++++ b/synapse/storage/schema/main/delta/27/ts.py
+@@ -55,8 +55,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
+ progress_json = json.dumps(progress)
+
+ sql = (
+- "INSERT into background_updates (update_name, progress_json)"
+- " VALUES (?, ?)"
++ "INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
+ )
+
+ cur.execute(sql, ("event_origin_server_ts", progress_json))
+diff --git a/synapse/storage/schema/main/delta/31/search_update.py b/synapse/storage/schema/main/delta/31/search_update.py
+index 0e65c9a841..46355122bb 100644
+--- a/synapse/storage/schema/main/delta/31/search_update.py
++++ b/synapse/storage/schema/main/delta/31/search_update.py
+@@ -59,8 +59,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
+ progress_json = json.dumps(progress)
+
+ sql = (
+- "INSERT into background_updates (update_name, progress_json)"
+- " VALUES (?, ?)"
++ "INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
+ )
+
+ cur.execute(sql, ("event_search_order", progress_json))
+diff --git a/synapse/storage/schema/main/delta/33/event_fields.py b/synapse/storage/schema/main/delta/33/event_fields.py
+index 9c02aeda88..53d215337e 100644
+--- a/synapse/storage/schema/main/delta/33/event_fields.py
++++ b/synapse/storage/schema/main/delta/33/event_fields.py
+@@ -55,8 +55,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
+ progress_json = json.dumps(progress)
+
+ sql = (
+- "INSERT into background_updates (update_name, progress_json)"
+- " VALUES (?, ?)"
++ "INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
+ )
+
+ cur.execute(sql, ("event_fields_sender_url", progress_json))
+diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
+index e9cdd19868..5549f3c9f8 100644
+--- a/synapse/types/__init__.py
++++ b/synapse/types/__init__.py
+@@ -889,8 +889,7 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
+ def __str__(self) -> str:
+ instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
+ return (
+- f"MultiWriterStreamToken(stream: {self.stream}, "
+- f"instances: {{{instances}}})"
++ f"MultiWriterStreamToken(stream: {self.stream}, instances: {{{instances}}})"
+ )
+
+
+diff --git a/synapse/types/state.py b/synapse/types/state.py
+index e641215f18..6420e050a5 100644
+--- a/synapse/types/state.py
++++ b/synapse/types/state.py
+@@ -462,7 +462,7 @@ class StateFilter:
+ new_types.update({state_type: set() for state_type in minus_wildcards})
+
+ # insert the plus wildcards
+- new_types.update({state_type: None for state_type in plus_wildcards})
++ new_types.update(dict.fromkeys(plus_wildcards))
+
+ # insert the specific state keys
+ for state_type, state_key in plus_state_keys:
+diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py
+index ff6adeb716..0a6a30aab2 100644
+--- a/synapse/util/iterutils.py
++++ b/synapse/util/iterutils.py
+@@ -114,7 +114,7 @@ def sorted_topologically(
+
+ # This is implemented by Kahn's algorithm.
+
+- degree_map = {node: 0 for node in nodes}
++ degree_map = dict.fromkeys(nodes, 0)
+ reverse_graph: Dict[T, Set[T]] = {}
+
+ for node, edges in graph.items():
+@@ -164,7 +164,7 @@ def sorted_topologically_batched(
+ persisted.
+ """
+
+- degree_map = {node: 0 for node in nodes}
++ degree_map = dict.fromkeys(nodes, 0)
+ reverse_graph: Dict[T, Set[T]] = {}
+
+ for node, edges in graph.items():
+diff --git a/tests/federation/test_federation_out_of_band_membership.py b/tests/federation/test_federation_out_of_band_membership.py
+index a4a266cf06..f77b8fe300 100644
+--- a/tests/federation/test_federation_out_of_band_membership.py
++++ b/tests/federation/test_federation_out_of_band_membership.py
+@@ -65,20 +65,20 @@ def required_state_json_to_state_map(required_state: Any) -> StateMap[EventBase]
+ if isinstance(required_state, list):
+ for state_event_dict in required_state:
+ # Yell because we're in a test and this is unexpected
+- assert isinstance(
+- state_event_dict, dict
+- ), "`required_state` should be a list of event dicts"
++ assert isinstance(state_event_dict, dict), (
++ "`required_state` should be a list of event dicts"
++ )
+
+ event_type = state_event_dict["type"]
+ event_state_key = state_event_dict["state_key"]
+
+ # Yell because we're in a test and this is unexpected
+- assert isinstance(
+- event_type, str
+- ), "Each event in `required_state` should have a string `type`"
+- assert isinstance(
+- event_state_key, str
+- ), "Each event in `required_state` should have a string `state_key`"
++ assert isinstance(event_type, str), (
++ "Each event in `required_state` should have a string `type`"
++ )
++ assert isinstance(event_state_key, str), (
++ "Each event in `required_state` should have a string `state_key`"
++ )
+
+ state_map[(event_type, event_state_key)] = make_event_from_dict(
+ state_event_dict
+diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
+index a9e9d7d7ea..b12ffc3665 100644
+--- a/tests/handlers/test_user_directory.py
++++ b/tests/handlers/test_user_directory.py
+@@ -1178,10 +1178,10 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
+ for use_numeric in [False, True]:
+ if use_numeric:
+ prefix1 = f"{i}"
+- prefix2 = f"{i+1}"
++ prefix2 = f"{i + 1}"
+ else:
+ prefix1 = f"a{i}"
+- prefix2 = f"a{i+1}"
++ prefix2 = f"a{i + 1}"
+
+ local_user_1 = self.register_user(f"user{char}{prefix1}", "password")
+ local_user_2 = self.register_user(f"user{char}{prefix2}", "password")
+diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py
+index e34df54e13..d5ebf10eac 100644
+--- a/tests/http/test_matrixfederationclient.py
++++ b/tests/http/test_matrixfederationclient.py
+@@ -436,8 +436,7 @@ class FederationClientTests(HomeserverTestCase):
+
+ # Send it the HTTP response
+ client.dataReceived(
+- b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
+- b"Server: Fake\r\n\r\n"
++ b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nServer: Fake\r\n\r\n"
+ )
+
+ # Push by enough to time it out
+@@ -691,10 +690,7 @@ class FederationClientTests(HomeserverTestCase):
+
+ # Send it a huge HTTP response
+ protocol.dataReceived(
+- b"HTTP/1.1 200 OK\r\n"
+- b"Server: Fake\r\n"
+- b"Content-Type: application/json\r\n"
+- b"\r\n"
++ b"HTTP/1.1 200 OK\r\nServer: Fake\r\nContent-Type: application/json\r\n\r\n"
+ )
+
+ self.pump()
+diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
+index 35e16a99ba..31dc32d67e 100644
+--- a/tests/media/test_media_storage.py
++++ b/tests/media/test_media_storage.py
+@@ -250,9 +250,7 @@ small_cmyk_jpeg = TestImage(
+ )
+
+ small_lossless_webp = TestImage(
+- unhexlify(
+- b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700"
+- ),
++ unhexlify(b"524946461a000000574542505650384c0d0000002f00000010071011118888fe0700"),
+ b"image/webp",
+ b".webp",
+ )
+diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py
+index fdc74efb5a..2a0189a4e1 100644
+--- a/tests/replication/tcp/streams/test_events.py
++++ b/tests/replication/tcp/streams/test_events.py
+@@ -324,7 +324,7 @@ class EventsStreamTestCase(BaseStreamTestCase):
+ pls = self.helper.get_state(
+ self.room_id, EventTypes.PowerLevels, tok=self.user_tok
+ )
+- pls["users"].update({u: 50 for u in user_ids})
++ pls["users"].update(dict.fromkeys(user_ids, 50))
+ self.helper.send_state(
+ self.room_id,
+ EventTypes.PowerLevels,
+diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
+index 1d44106bd7..165d175ab2 100644
+--- a/tests/rest/admin/test_room.py
++++ b/tests/rest/admin/test_room.py
+@@ -1312,7 +1312,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
+ # Check that response json body contains a "rooms" key
+ self.assertTrue(
+ "rooms" in channel.json_body,
+- msg="Response body does not " "contain a 'rooms' key",
++ msg="Response body does not contain a 'rooms' key",
+ )
+
+ # Check that 3 rooms were returned
+diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
+index 874c29c935..f09f66da00 100644
+--- a/tests/rest/admin/test_user.py
++++ b/tests/rest/admin/test_user.py
+@@ -3901,9 +3901,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
+ image_data1 = SMALL_PNG
+ # Resolution: 1×1, MIME type: image/gif, Extension: gif, Size: 35 B
+ image_data2 = unhexlify(
+- b"47494638376101000100800100000000"
+- b"ffffff2c00000000010001000002024c"
+- b"01003b"
++ b"47494638376101000100800100000000ffffff2c00000000010001000002024c01003b"
+ )
+ # Resolution: 1×1, MIME type: image/bmp, Extension: bmp, Size: 54 B
+ image_data3 = unhexlify(
+diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py
+index 2293994793..535420209b 100644
+--- a/tests/rest/client/sliding_sync/test_rooms_timeline.py
++++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py
+@@ -309,8 +309,8 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
+ self.assertEqual(
+ response_body["rooms"][room_id1]["limited"],
+ False,
+- f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} '
+- + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
++ f"Our `timeline_limit` was {sync_body['lists']['foo-list']['timeline_limit']} "
++ + f"and {len(response_body['rooms'][room_id1]['timeline'])} events were returned in the timeline. "
+ + str(response_body["rooms"][room_id1]),
+ )
+ # Check to make sure the latest events are returned
+@@ -387,7 +387,7 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
+ response_body["rooms"][room_id1]["limited"],
+ True,
+ f"Our `timeline_limit` was {timeline_limit} "
+- + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
++ + f"and {len(response_body['rooms'][room_id1]['timeline'])} events were returned in the timeline. "
+ + str(response_body["rooms"][room_id1]),
+ )
+ # Check to make sure that the "live" and historical events are returned
+diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py
+index 1ea2a5c884..9ad8ecf1cd 100644
+--- a/tests/rest/client/test_media.py
++++ b/tests/rest/client/test_media.py
+@@ -1006,7 +1006,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
+ data = base64.b64encode(SMALL_PNG)
+
+ end_content = (
+- b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>"
++ b'<html><head><img src="data:image/png;base64,%s" /></head></html>'
+ ) % (data,)
+
+ channel = self.make_request(
+diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
+index 53f1782d59..280486da08 100644
+--- a/tests/rest/client/utils.py
++++ b/tests/rest/client/utils.py
+@@ -716,9 +716,9 @@ class RestHelper:
+ "/login",
+ content={"type": "m.login.token", "token": login_token},
+ )
+- assert (
+- channel.code == expected_status
+- ), f"unexpected status in response: {channel.code}"
++ assert channel.code == expected_status, (
++ f"unexpected status in response: {channel.code}"
++ )
+ return channel.json_body
+
+ def auth_via_oidc(
+diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py
+index 103d7662d9..2a7bee19f9 100644
+--- a/tests/rest/media/test_url_preview.py
++++ b/tests/rest/media/test_url_preview.py
+@@ -878,7 +878,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
+ data = base64.b64encode(SMALL_PNG)
+
+ end_content = (
+- b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>"
++ b'<html><head><img src="data:image/png;base64,%s" /></head></html>'
+ ) % (data,)
+
+ channel = self.make_request(
+diff --git a/tests/server.py b/tests/server.py
+index 84ed9f68eb..f01708b77f 100644
+--- a/tests/server.py
++++ b/tests/server.py
+@@ -225,9 +225,9 @@ class FakeChannel:
+ new_headers.addRawHeader(k, v)
+ headers = new_headers
+
+- assert isinstance(
+- headers, Headers
+- ), f"headers are of the wrong type: {headers!r}"
++ assert isinstance(headers, Headers), (
++ f"headers are of the wrong type: {headers!r}"
++ )
+
+ self.result["headers"] = headers
+
+diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
+index 9420d03841..11313fc933 100644
+--- a/tests/storage/test_base.py
++++ b/tests/storage/test_base.py
+@@ -349,7 +349,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
+ )
+
+ self.mock_txn.execute.assert_called_once_with(
+- "UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?",
++ "UPDATE tablename SET colC = ?, colD = ? WHERE colA = ? AND colB = ?",
+ [3, 4, 1, 2],
+ )
+
+diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
+index ba01b038ab..74edca7523 100644
+--- a/tests/storage/test_devices.py
++++ b/tests/storage/test_devices.py
+@@ -211,9 +211,9 @@ class DeviceStoreTestCase(HomeserverTestCase):
+ even if that means leaving an earlier batch one EDU short of the limit.
+ """
+
+- assert self.hs.is_mine_id(
+- "@user_id:test"
+- ), "Test not valid: this MXID should be considered local"
++ assert self.hs.is_mine_id("@user_id:test"), (
++ "Test not valid: this MXID should be considered local"
++ )
+
+ self.get_success(
+ self.store.set_e2e_cross_signing_key(
+diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
+index 088f0d24f9..0500c68e9d 100644
+--- a/tests/storage/test_event_federation.py
++++ b/tests/storage/test_event_federation.py
+@@ -114,7 +114,7 @@ def get_all_topologically_sorted_orders(
+ # This is implemented by Kahn's algorithm, and forking execution each time
+ # we have a choice over which node to consider next.
+
+- degree_map = {node: 0 for node in nodes}
++ degree_map = dict.fromkeys(nodes, 0)
+ reverse_graph: Dict[T, Set[T]] = {}
+
+ for node, edges in graph.items():
+diff --git a/tests/test_state.py b/tests/test_state.py
+index dce56fe78a..adb72b0730 100644
+--- a/tests/test_state.py
++++ b/tests/test_state.py
+@@ -149,7 +149,7 @@ class _DummyStore:
+ async def get_partial_state_events(
+ self, event_ids: Collection[str]
+ ) -> Dict[str, bool]:
+- return {e: False for e in event_ids}
++ return dict.fromkeys(event_ids, False)
+
+ async def get_state_group_delta(
+ self, name: str
+diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py
+index dd40c338d6..d58222a9f6 100644
+--- a/tests/test_utils/logging_setup.py
++++ b/tests/test_utils/logging_setup.py
+@@ -48,7 +48,7 @@ def setup_logging() -> None:
+
+ # We exclude `%(asctime)s` from this format because the Twisted logger adds its own
+ # timestamp
+- log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s"
++ log_format = "%(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s"
+
+ handler = ToTwistedHandler()
+ formatter = logging.Formatter(log_format)
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0068-1.130.0.patch b/packages/overlays/matrix-synapse/patches/0068-1.130.0.patch
new file mode 100644
index 0000000..4c282de
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0068-1.130.0.patch
@@ -0,0 +1,77 @@
+From a36f3a6d875ce92e3cf6f3659f99ad71f8a0c069 Mon Sep 17 00:00:00 2001
+From: Devon Hudson <devonhudson@librem.one>
+Date: Tue, 20 May 2025 08:35:23 -0600
+Subject: [PATCH 68/74] 1.130.0
+
+---
+ CHANGES.md | 10 ++++++++++
+ changelog.d/18439.bugfix | 1 -
+ changelog.d/18447.bugfix | 1 -
+ debian/changelog | 6 ++++++
+ pyproject.toml | 2 +-
+ 5 files changed, 17 insertions(+), 3 deletions(-)
+ delete mode 100644 changelog.d/18439.bugfix
+ delete mode 100644 changelog.d/18447.bugfix
+
+diff --git a/CHANGES.md b/CHANGES.md
+index a0a9d2f064..6837ad6bef 100644
+--- a/CHANGES.md
++++ b/CHANGES.md
+@@ -1,3 +1,13 @@
++# Synapse 1.130.0 (2025-05-20)
++
++### Bugfixes
++
++- Fix startup being blocked on creating a new index. Introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439))
++- Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). ([\#18447](https://github.com/element-hq/synapse/issues/18447))
++
++
++
++
+ # Synapse 1.130.0rc1 (2025-05-13)
+
+ ### Features
+diff --git a/changelog.d/18439.bugfix b/changelog.d/18439.bugfix
+deleted file mode 100644
+index 5ee9bda474..0000000000
+--- a/changelog.d/18439.bugfix
++++ /dev/null
+@@ -1 +0,0 @@
+-Fix startup being blocked on creating a new index. Introduced in v1.130.0rc1.
+diff --git a/changelog.d/18447.bugfix b/changelog.d/18447.bugfix
+deleted file mode 100644
+index 578be1ffe9..0000000000
+--- a/changelog.d/18447.bugfix
++++ /dev/null
+@@ -1 +0,0 @@
+-Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6).
+diff --git a/debian/changelog b/debian/changelog
+index e3eb894851..56776a7d86 100644
+--- a/debian/changelog
++++ b/debian/changelog
+@@ -1,3 +1,9 @@
++matrix-synapse-py3 (1.130.0) stable; urgency=medium
++
++ * New Synapse release 1.130.0.
++
++ -- Synapse Packaging team <packages@matrix.org> Tue, 20 May 2025 08:34:13 -0600
++
+ matrix-synapse-py3 (1.130.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.130.0rc1.
+diff --git a/pyproject.toml b/pyproject.toml
+index 5f80d28344..7bc9fd4130 100644
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
+
+ [tool.poetry]
+ name = "matrix-synapse"
+-version = "1.130.0rc1"
++version = "1.130.0"
+ description = "Homeserver for the Matrix decentralised comms protocol"
+ authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
+ license = "AGPL-3.0-or-later"
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0069-Tweak-changelog.patch b/packages/overlays/matrix-synapse/patches/0069-Tweak-changelog.patch
new file mode 100644
index 0000000..4619fab
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0069-Tweak-changelog.patch
@@ -0,0 +1,25 @@
+From f92c6455efbecaba1ddb1595e597aec0d7e4fb42 Mon Sep 17 00:00:00 2001
+From: Devon Hudson <devonhudson@librem.one>
+Date: Tue, 20 May 2025 08:46:37 -0600
+Subject: [PATCH 69/74] Tweak changelog
+
+---
+ CHANGES.md | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/CHANGES.md b/CHANGES.md
+index 6837ad6bef..d29027bbfb 100644
+--- a/CHANGES.md
++++ b/CHANGES.md
+@@ -2,7 +2,7 @@
+
+ ### Bugfixes
+
+-- Fix startup being blocked on creating a new index. Introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439))
++- Fix startup being blocked on creating a new index that was introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439))
+ - Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). ([\#18447](https://github.com/element-hq/synapse/issues/18447))
+
+
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0070-Add-a-unit-test-for-the-phone-home-stats-18463.patch b/packages/overlays/matrix-synapse/patches/0070-Add-a-unit-test-for-the-phone-home-stats-18463.patch
new file mode 100644
index 0000000..613644c
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0070-Add-a-unit-test-for-the-phone-home-stats-18463.patch
@@ -0,0 +1,384 @@
+From 4b1d9d5d0e3df7a3151c07f9d42b02dad13a27bf Mon Sep 17 00:00:00 2001
+From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
+Date: Tue, 20 May 2025 16:26:45 +0100
+Subject: [PATCH 70/74] Add a unit test for the phone home stats (#18463)
+
+---
+ changelog.d/18463.misc | 1 +
+ .../reporting_homeserver_usage_statistics.md | 6 +-
+ synapse/app/phone_stats_home.py | 33 ++-
+ tests/metrics/test_phone_home_stats.py | 263 ++++++++++++++++++
+ 4 files changed, 296 insertions(+), 7 deletions(-)
+ create mode 100644 changelog.d/18463.misc
+ create mode 100644 tests/metrics/test_phone_home_stats.py
+
+diff --git a/changelog.d/18463.misc b/changelog.d/18463.misc
+new file mode 100644
+index 0000000000..1264758d7c
+--- /dev/null
++++ b/changelog.d/18463.misc
+@@ -0,0 +1 @@
++Add unit tests for homeserver usage statistics.
+\ No newline at end of file
+diff --git a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md
+index 4c0dbb5acd..a8a717e2a2 100644
+--- a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md
++++ b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md
+@@ -30,7 +30,7 @@ The following statistics are sent to the configured reporting endpoint:
+ | `python_version` | string | The Python version number in use (e.g "3.7.1"). Taken from `sys.version_info`. |
+ | `total_users` | int | The number of registered users on the homeserver. |
+ | `total_nonbridged_users` | int | The number of users, excluding those created by an Application Service. |
+-| `daily_user_type_native` | int | The number of native users created in the last 24 hours. |
++| `daily_user_type_native` | int | The number of native, non-guest users created in the last 24 hours. |
+ | `daily_user_type_guest` | int | The number of guest users created in the last 24 hours. |
+ | `daily_user_type_bridged` | int | The number of users created by Application Services in the last 24 hours. |
+ | `total_room_count` | int | The total number of rooms present on the homeserver. |
+@@ -50,8 +50,8 @@ The following statistics are sent to the configured reporting endpoint:
+ | `cache_factor` | int | The configured [`global factor`](../../configuration/config_documentation.md#caching) value for caching. |
+ | `event_cache_size` | int | The configured [`event_cache_size`](../../configuration/config_documentation.md#caching) value for caching. |
+ | `database_engine` | string | The database engine that is in use. Either "psycopg2" meaning PostgreSQL is in use, or "sqlite3" for SQLite3. |
+-| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. |
+-| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. |
++| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. |
++| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. |
+
+
+ [^1]: Native matrix users and guests are always counted. If the
+diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py
+index f602bbbeea..bb450a235c 100644
+--- a/synapse/app/phone_stats_home.py
++++ b/synapse/app/phone_stats_home.py
+@@ -34,6 +34,22 @@ if TYPE_CHECKING:
+
+ logger = logging.getLogger("synapse.app.homeserver")
+
++ONE_MINUTE_SECONDS = 60
++ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS
++
++MILLISECONDS_PER_SECOND = 1000
++
++INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
++"""
++We wait 5 minutes to send the first set of stats as the server can be quite busy the
++first few minutes
++"""
++
++PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS
++"""
++Phone home stats are sent every 3 hours
++"""
++
+ # Contains the list of processes we will be monitoring
+ # currently either 0 or 1
+ _stats_process: List[Tuple[int, "resource.struct_rusage"]] = []
+@@ -185,12 +201,14 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
+ # If you increase the loop period, the accuracy of user_daily_visits
+ # table will decrease
+ clock.looping_call(
+- hs.get_datastores().main.generate_user_daily_visits, 5 * 60 * 1000
++ hs.get_datastores().main.generate_user_daily_visits,
++ 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND,
+ )
+
+ # monthly active user limiting functionality
+ clock.looping_call(
+- hs.get_datastores().main.reap_monthly_active_users, 1000 * 60 * 60
++ hs.get_datastores().main.reap_monthly_active_users,
++ ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND,
+ )
+ hs.get_datastores().main.reap_monthly_active_users()
+
+@@ -221,7 +239,12 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
+
+ if hs.config.metrics.report_stats:
+ logger.info("Scheduling stats reporting for 3 hour intervals")
+- clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats)
++ clock.looping_call(
++ phone_stats_home,
++ PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND,
++ hs,
++ stats,
++ )
+
+ # We need to defer this init for the cases that we daemonize
+ # otherwise the process ID we get is that of the non-daemon process
+@@ -229,4 +252,6 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
+
+ # We wait 5 minutes to send the first set of stats as the server can
+ # be quite busy the first few minutes
+- clock.call_later(5 * 60, phone_stats_home, hs, stats)
++ clock.call_later(
++ INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS, phone_stats_home, hs, stats
++ )
+diff --git a/tests/metrics/test_phone_home_stats.py b/tests/metrics/test_phone_home_stats.py
+new file mode 100644
+index 0000000000..5339d649df
+--- /dev/null
++++ b/tests/metrics/test_phone_home_stats.py
+@@ -0,0 +1,263 @@
++#
++# This file is licensed under the Affero General Public License (AGPL) version 3.
++#
++# Copyright (C) 2025 New Vector, Ltd
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU Affero General Public License as
++# published by the Free Software Foundation, either version 3 of the
++# License, or (at your option) any later version.
++#
++# See the GNU Affero General Public License for more details:
++# <https://www.gnu.org/licenses/agpl-3.0.html>.
++
++import logging
++from unittest.mock import AsyncMock
++
++from twisted.test.proto_helpers import MemoryReactor
++
++from synapse.app.phone_stats_home import (
++ PHONE_HOME_INTERVAL_SECONDS,
++ start_phone_stats_home,
++)
++from synapse.rest import admin, login, register, room
++from synapse.server import HomeServer
++from synapse.types import JsonDict
++from synapse.util import Clock
++
++from tests import unittest
++from tests.server import ThreadedMemoryReactorClock
++
++TEST_REPORT_STATS_ENDPOINT = "https://fake.endpoint/stats"
++TEST_SERVER_CONTEXT = "test-server-context"
++
++
++class PhoneHomeStatsTestCase(unittest.HomeserverTestCase):
++ servlets = [
++ admin.register_servlets_for_client_rest_resource,
++ room.register_servlets,
++ register.register_servlets,
++ login.register_servlets,
++ ]
++
++ def make_homeserver(
++ self, reactor: ThreadedMemoryReactorClock, clock: Clock
++ ) -> HomeServer:
++ # Configure the homeserver to enable stats reporting.
++ config = self.default_config()
++ config["report_stats"] = True
++ config["report_stats_endpoint"] = TEST_REPORT_STATS_ENDPOINT
++
++ # Configure the server context so we can check it ends up being reported
++ config["server_context"] = TEST_SERVER_CONTEXT
++
++ # Allow guests to be registered
++ config["allow_guest_access"] = True
++
++ hs = self.setup_test_homeserver(config=config)
++
++ # Replace the proxied http client with a mock, so we can inspect outbound requests to
++ # the configured stats endpoint.
++ self.put_json_mock = AsyncMock(return_value={})
++ hs.get_proxied_http_client().put_json = self.put_json_mock # type: ignore[method-assign]
++ return hs
++
++ def prepare(
++ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
++ ) -> None:
++ self.store = homeserver.get_datastores().main
++
++ # Wait for the background updates to add the database triggers that keep the
++ # `event_stats` table up-to-date.
++ self.wait_for_background_updates()
++
++ # Force stats reporting to occur
++ start_phone_stats_home(hs=homeserver)
++
++ super().prepare(reactor, clock, homeserver)
++
++ def _get_latest_phone_home_stats(self) -> JsonDict:
++ # Wait for `phone_stats_home` to be called again + a healthy margin (50s).
++ self.reactor.advance(2 * PHONE_HOME_INTERVAL_SECONDS + 50)
++
++ # Extract the reported stats from our http client mock
++ mock_calls = self.put_json_mock.call_args_list
++ report_stats_calls = []
++ for call in mock_calls:
++ if call.args[0] == TEST_REPORT_STATS_ENDPOINT:
++ report_stats_calls.append(call)
++
++ self.assertGreaterEqual(
++ (len(report_stats_calls)),
++ 1,
++ "Expected at-least one call to the report_stats endpoint",
++ )
++
++ # Extract the phone home stats from the call
++ phone_home_stats = report_stats_calls[0].args[1]
++
++ return phone_home_stats
++
++ def _perform_user_actions(self) -> None:
++ """
++ Perform some actions on the homeserver that would bump the phone home
++ stats.
++
++ This creates a few users (including a guest), a room, and sends some messages.
++ Expected number of events:
++ - 10 unencrypted messages
++ - 5 encrypted messages
++ - 24 total events (including room state, etc)
++ """
++
++ # Create some users
++ user_1_mxid = self.register_user(
++ username="test_user_1",
++ password="test",
++ )
++ user_2_mxid = self.register_user(
++ username="test_user_2",
++ password="test",
++ )
++ # Note: `self.register_user` does not support guest registration, and updating the
++ # Admin API it calls to add a new parameter would cause the `mac` parameter to fail
++ # in a backwards-incompatible manner. Hence, we make a manual request here.
++ _guest_user_mxid = self.make_request(
++ method="POST",
++ path="/_matrix/client/v3/register?kind=guest",
++ content={
++ "username": "guest_user",
++ "password": "test",
++ },
++ shorthand=False,
++ )
++
++ # Log in to each user
++ user_1_token = self.login(username=user_1_mxid, password="test")
++ user_2_token = self.login(username=user_2_mxid, password="test")
++
++ # Create a room between the two users
++ room_1_id = self.helper.create_room_as(
++ is_public=False,
++ tok=user_1_token,
++ )
++
++ # Mark this room as end-to-end encrypted
++ self.helper.send_state(
++ room_id=room_1_id,
++ event_type="m.room.encryption",
++ body={
++ "algorithm": "m.megolm.v1.aes-sha2",
++ "rotation_period_ms": 604800000,
++ "rotation_period_msgs": 100,
++ },
++ state_key="",
++ tok=user_1_token,
++ )
++
++ # User 1 invites user 2
++ self.helper.invite(
++ room=room_1_id,
++ src=user_1_mxid,
++ targ=user_2_mxid,
++ tok=user_1_token,
++ )
++
++ # User 2 joins
++ self.helper.join(
++ room=room_1_id,
++ user=user_2_mxid,
++ tok=user_2_token,
++ )
++
++ # User 1 sends 10 unencrypted messages
++ for _ in range(10):
++ self.helper.send(
++ room_id=room_1_id,
++ body="Zoinks Scoob! A message!",
++ tok=user_1_token,
++ )
++
++ # User 2 sends 5 encrypted "messages"
++ for _ in range(5):
++ self.helper.send_event(
++ room_id=room_1_id,
++ type="m.room.encrypted",
++ content={
++ "algorithm": "m.olm.v1.curve25519-aes-sha2",
++ "sender_key": "some_key",
++ "ciphertext": {
++ "some_key": {
++ "type": 0,
++ "body": "encrypted_payload",
++ },
++ },
++ },
++ tok=user_2_token,
++ )
++
++ def test_phone_home_stats(self) -> None:
++ """
++ Test that the phone home stats contain the stats we expect based on
++ the scenario carried out in `prepare`
++ """
++ # Do things to bump the stats
++ self._perform_user_actions()
++
++ # Wait for the stats to be reported
++ phone_home_stats = self._get_latest_phone_home_stats()
++
++ self.assertEqual(
++ phone_home_stats["homeserver"], self.hs.config.server.server_name
++ )
++
++ self.assertTrue(isinstance(phone_home_stats["memory_rss"], int))
++ self.assertTrue(isinstance(phone_home_stats["cpu_average"], int))
++
++ self.assertEqual(phone_home_stats["server_context"], TEST_SERVER_CONTEXT)
++
++ self.assertTrue(isinstance(phone_home_stats["timestamp"], int))
++ self.assertTrue(isinstance(phone_home_stats["uptime_seconds"], int))
++ self.assertTrue(isinstance(phone_home_stats["python_version"], str))
++
++ # We expect only our test users to exist on the homeserver
++ self.assertEqual(phone_home_stats["total_users"], 3)
++ self.assertEqual(phone_home_stats["total_nonbridged_users"], 3)
++ self.assertEqual(phone_home_stats["daily_user_type_native"], 2)
++ self.assertEqual(phone_home_stats["daily_user_type_guest"], 1)
++ self.assertEqual(phone_home_stats["daily_user_type_bridged"], 0)
++ self.assertEqual(phone_home_stats["total_room_count"], 1)
++ self.assertEqual(phone_home_stats["daily_active_users"], 2)
++ self.assertEqual(phone_home_stats["monthly_active_users"], 2)
++ self.assertEqual(phone_home_stats["daily_active_rooms"], 1)
++ self.assertEqual(phone_home_stats["daily_active_e2ee_rooms"], 1)
++ self.assertEqual(phone_home_stats["daily_messages"], 10)
++ self.assertEqual(phone_home_stats["daily_e2ee_messages"], 5)
++ self.assertEqual(phone_home_stats["daily_sent_messages"], 10)
++ self.assertEqual(phone_home_stats["daily_sent_e2ee_messages"], 5)
++
++ # Our users have not been around for >30 days, hence these are all 0.
++ self.assertEqual(phone_home_stats["r30v2_users_all"], 0)
++ self.assertEqual(phone_home_stats["r30v2_users_android"], 0)
++ self.assertEqual(phone_home_stats["r30v2_users_ios"], 0)
++ self.assertEqual(phone_home_stats["r30v2_users_electron"], 0)
++ self.assertEqual(phone_home_stats["r30v2_users_web"], 0)
++ self.assertEqual(
++ phone_home_stats["cache_factor"], self.hs.config.caches.global_factor
++ )
++ self.assertEqual(
++ phone_home_stats["event_cache_size"],
++ self.hs.config.caches.event_cache_size,
++ )
++ self.assertEqual(
++ phone_home_stats["database_engine"],
++ self.hs.config.database.databases[0].config["name"],
++ )
++ self.assertEqual(
++ phone_home_stats["database_server_version"],
++ self.hs.get_datastores().main.database_engine.server_version,
++ )
++
++ synapse_logger = logging.getLogger("synapse")
++ log_level = synapse_logger.getEffectiveLevel()
++ self.assertEqual(phone_home_stats["log_level"], logging.getLevelName(log_level))
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0071-Include-room-ID-in-room-deletion-status-response-183.patch b/packages/overlays/matrix-synapse/patches/0071-Include-room-ID-in-room-deletion-status-response-183.patch
new file mode 100644
index 0000000..4ce28c4
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0071-Include-room-ID-in-room-deletion-status-response-183.patch
@@ -0,0 +1,116 @@
+From 553e124f766584456fbdb6d1aa37fdd12ad54dad Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Dagfinn=20Ilmari=20Manns=C3=A5ker?= <ilmari@ilmari.org>
+Date: Tue, 20 May 2025 17:53:30 +0100
+Subject: [PATCH 71/74] Include room ID in room deletion status response
+ (#18318)
+
+When querying by `delete_id` it's handy to see which room the delete
+pertains to.
+---
+ changelog.d/18318.feature | 1 +
+ docs/admin_api/rooms.md | 7 ++++++-
+ synapse/rest/admin/rooms.py | 1 +
+ tests/rest/admin/test_room.py | 7 +++++++
+ 4 files changed, 15 insertions(+), 1 deletion(-)
+ create mode 100644 changelog.d/18318.feature
+
+diff --git a/changelog.d/18318.feature b/changelog.d/18318.feature
+new file mode 100644
+index 0000000000..fba0e83577
+--- /dev/null
++++ b/changelog.d/18318.feature
+@@ -0,0 +1 @@
++Include room ID in room deletion status response.
+diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
+index bfc2cd4376..bdda9b47ad 100644
+--- a/docs/admin_api/rooms.md
++++ b/docs/admin_api/rooms.md
+@@ -794,6 +794,7 @@ A response body like the following is returned:
+ "results": [
+ {
+ "delete_id": "delete_id1",
++ "room_id": "!roomid:example.com",
+ "status": "failed",
+ "error": "error message",
+ "shutdown_room": {
+@@ -804,6 +805,7 @@ A response body like the following is returned:
+ }
+ }, {
+ "delete_id": "delete_id2",
++ "room_id": "!roomid:example.com",
+ "status": "purging",
+ "shutdown_room": {
+ "kicked_users": [
+@@ -842,6 +844,8 @@ A response body like the following is returned:
+ ```json
+ {
+ "status": "purging",
++ "delete_id": "bHkCNQpHqOaFhPtK",
++ "room_id": "!roomid:example.com",
+ "shutdown_room": {
+ "kicked_users": [
+ "@foobar:example.com"
+@@ -869,7 +873,8 @@ The following fields are returned in the JSON response body:
+ - `results` - An array of objects, each containing information about one task.
+ This field is omitted from the result when you query by `delete_id`.
+ Task objects contain the following fields:
+- - `delete_id` - The ID for this purge if you query by `room_id`.
++ - `delete_id` - The ID for this purge
++ - `room_id` - The ID of the room being deleted
+ - `status` - The status will be one of:
+ - `shutting_down` - The process is removing users from the room.
+ - `purging` - The process is purging the room and event data from database.
+diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
+index 3097cb1a9d..f8c5bf18d4 100644
+--- a/synapse/rest/admin/rooms.py
++++ b/synapse/rest/admin/rooms.py
+@@ -150,6 +150,7 @@ class RoomRestV2Servlet(RestServlet):
+ def _convert_delete_task_to_response(task: ScheduledTask) -> JsonDict:
+ return {
+ "delete_id": task.id,
++ "room_id": task.resource_id,
+ "status": task.status,
+ "shutdown_room": task.result,
+ }
+diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
+index 165d175ab2..8d806082aa 100644
+--- a/tests/rest/admin/test_room.py
++++ b/tests/rest/admin/test_room.py
+@@ -758,6 +758,8 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
+ self.assertEqual(2, len(channel.json_body["results"]))
+ self.assertEqual("complete", channel.json_body["results"][0]["status"])
+ self.assertEqual("complete", channel.json_body["results"][1]["status"])
++ self.assertEqual(self.room_id, channel.json_body["results"][0]["room_id"])
++ self.assertEqual(self.room_id, channel.json_body["results"][1]["room_id"])
+ delete_ids = {delete_id1, delete_id2}
+ self.assertTrue(channel.json_body["results"][0]["delete_id"] in delete_ids)
+ delete_ids.remove(channel.json_body["results"][0]["delete_id"])
+@@ -777,6 +779,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
+ self.assertEqual(1, len(channel.json_body["results"]))
+ self.assertEqual("complete", channel.json_body["results"][0]["status"])
+ self.assertEqual(delete_id2, channel.json_body["results"][0]["delete_id"])
++ self.assertEqual(self.room_id, channel.json_body["results"][0]["room_id"])
+
+ # get status after more than clearing time for all tasks
+ self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2)
+@@ -1237,6 +1240,9 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
+ self.assertEqual(
+ delete_id, channel_room_id.json_body["results"][0]["delete_id"]
+ )
++ self.assertEqual(
++ self.room_id, channel_room_id.json_body["results"][0]["room_id"]
++ )
+
+ # get information by delete_id
+ channel_delete_id = self.make_request(
+@@ -1249,6 +1255,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
+ channel_delete_id.code,
+ msg=channel_delete_id.json_body,
+ )
++ self.assertEqual(self.room_id, channel_delete_id.json_body["room_id"])
+
+ # test values that are the same in both responses
+ for content in [
+--
+2.49.0
+
diff --git a/private/0001-Hotfix-ignore-rejected-events-in-delayed_events.patch b/packages/overlays/matrix-synapse/patches/0072-Hotfix-ignore-rejected-events-in-delayed_events.patch
index 4347b0b..e326cd3 100644
--- a/private/0001-Hotfix-ignore-rejected-events-in-delayed_events.patch
+++ b/packages/overlays/matrix-synapse/patches/0072-Hotfix-ignore-rejected-events-in-delayed_events.patch
@@ -1,7 +1,7 @@
-From 8bd4f70bf79f0353318af10509997df6558f93e5 Mon Sep 17 00:00:00 2001
+From bd2439ec4662f8ad9333797c02f4df764047ace5 Mon Sep 17 00:00:00 2001
From: Rory& <root@rory.gay>
Date: Sun, 20 Apr 2025 00:30:29 +0200
-Subject: [PATCH] Hotfix: ignore rejected events in delayed_events
+Subject: [PATCH 72/74] Hotfix: ignore rejected events in delayed_events
---
synapse/handlers/delayed_events.py | 7 ++++++-
@@ -27,5 +27,5 @@ index 80cb1cec9b..cb2a34ff73 100644
next_send_ts = await self._store.cancel_delayed_state_events(
--
-2.48.1
+2.49.0
diff --git a/packages/overlays/matrix-synapse/patches/0073-Add-too-much-logging-to-room-summary-over-federation.patch b/packages/overlays/matrix-synapse/patches/0073-Add-too-much-logging-to-room-summary-over-federation.patch
new file mode 100644
index 0000000..31caf8a
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0073-Add-too-much-logging-to-room-summary-over-federation.patch
@@ -0,0 +1,77 @@
+From 5f6b610df67bdb57e4de09168923782b934c34fe Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Wed, 23 Apr 2025 17:53:52 +0200
+Subject: [PATCH 73/74] Add too much logging to room summary over federation
+
+Signed-off-by: Rory& <root@rory.gay>
+---
+ synapse/handlers/room_summary.py | 40 ++++++++++++++++++++++++++++----
+ 1 file changed, 36 insertions(+), 4 deletions(-)
+
+diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
+index 64f5bea014..6e64930682 100644
+--- a/synapse/handlers/room_summary.py
++++ b/synapse/handlers/room_summary.py
+@@ -700,23 +700,55 @@ class RoomSummaryHandler:
+ """
+ # The API doesn't return the room version so assume that a
+ # join rule of knock is valid.
++ join_rule = room.get("join_rule")
++ world_readable = room.get("world_readable")
++
++ logger.warning(
++ "[EMMA] Checking if room %s is accessible to %s: join_rule=%s, world_readable=%s",
++ room_id, requester, join_rule, world_readable
++ )
++
+ if (
+- room.get("join_rule")
+- in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
+- or room.get("world_readable") is True
++ join_rule in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
++ or world_readable is True
+ ):
+ return True
+- elif not requester:
++ else:
++ logger.warning(
++ "[EMMA] Room %s is not accessible to %s: join_rule=%s, world_readable=%s, join_rule result=%s, world_readable result=%s",
++ room_id, requester, join_rule, world_readable,
++ join_rule in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED),
++ world_readable is True
++ )
++
++ if not requester:
++ logger.warning(
++ "[EMMA] No requester, so room %s is not accessible",
++ room_id
++ )
+ return False
++
+
+ # Check if the user is a member of any of the allowed rooms from the response.
+ allowed_rooms = room.get("allowed_room_ids")
++ logger.warning(
++ "[EMMA] Checking if room %s is in allowed rooms for %s: join_rule=%s, allowed_rooms=%s",
++ requester,
++ room_id,
++ join_rule,
++ allowed_rooms
++ )
+ if allowed_rooms and isinstance(allowed_rooms, list):
+ if await self._event_auth_handler.is_user_in_rooms(
+ allowed_rooms, requester
+ ):
+ return True
+
++ logger.warning(
++ "[EMMA] Checking if room %s is accessble to %s via local state",
++ room_id,
++ requester
++ )
+ # Finally, check locally if we can access the room. The user might
+ # already be in the room (if it was a child room), or there might be a
+ # pending invite, etc.
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0074-Log-entire-room-if-accessibility-check-fails.patch b/packages/overlays/matrix-synapse/patches/0074-Log-entire-room-if-accessibility-check-fails.patch
new file mode 100644
index 0000000..12a33e6
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0074-Log-entire-room-if-accessibility-check-fails.patch
@@ -0,0 +1,28 @@
+From 923a3c7204aea235744d3081a1d3cc99b757f801 Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Wed, 23 Apr 2025 18:24:57 +0200
+Subject: [PATCH 74/74] Log entire room if accessibility check fails
+
+Signed-off-by: Rory& <root@rory.gay>
+---
+ synapse/handlers/room_summary.py | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
+index 6e64930682..1c39cfed1b 100644
+--- a/synapse/handlers/room_summary.py
++++ b/synapse/handlers/room_summary.py
+@@ -916,6 +916,10 @@ class RoomSummaryHandler:
+ if not room_entry or not await self._is_remote_room_accessible(
+ requester, room_entry.room_id, room_entry.room
+ ):
++ logger.warning(
++ "[Emma] Room entry contents: %s",
++ room_entry.room if room_entry else None
++ )
+ raise NotFoundError("Room not found or is not accessible")
+
+ room = dict(room_entry.room)
+--
+2.49.0
+
diff --git a/private/synapse-fast-links.patch b/packages/overlays/matrix-synapse/patches/synapse-fast-links.patch
index c35ba87..c35ba87 100644
--- a/private/synapse-fast-links.patch
+++ b/packages/overlays/matrix-synapse/patches/synapse-fast-links.patch
|