diff --git a/packages/overlays/matrix-synapse/patches/0001-Hotfix-ignore-rejected-events-in-delayed_events.patch b/packages/overlays/matrix-synapse/patches/0001-Hotfix-ignore-rejected-events-in-delayed_events.patch
new file mode 100644
index 0000000..10bda30
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0001-Hotfix-ignore-rejected-events-in-delayed_events.patch
@@ -0,0 +1,31 @@
+From c9685b56adfec0f8917e2a04b792519c57f0baa4 Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Sun, 20 Apr 2025 00:30:29 +0200
+Subject: [PATCH 01/10] Hotfix: ignore rejected events in delayed_events
+
+---
+ synapse/handlers/delayed_events.py | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py
+index 80cb1cec9b..cb2a34ff73 100644
+--- a/synapse/handlers/delayed_events.py
++++ b/synapse/handlers/delayed_events.py
+@@ -208,8 +208,13 @@ class DelayedEventsHandler:
+ )
+
+ event = await self._store.get_event(
+- delta.event_id, check_room_id=delta.room_id
++ delta.event_id, check_room_id=delta.room_id, allow_rejected=True, allow_none=True
+ )
++
++ if event is None or event.rejected_reason is not None:
++ # This event has been rejected, so we don't want to cancel any delayed events for it.
++ continue
++
+ sender = UserID.from_string(event.sender)
+
+ next_send_ts = await self._store.cancel_delayed_state_events(
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0002-Add-too-much-logging-to-room-summary-over-federation.patch b/packages/overlays/matrix-synapse/patches/0002-Add-too-much-logging-to-room-summary-over-federation.patch
new file mode 100644
index 0000000..8441823
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0002-Add-too-much-logging-to-room-summary-over-federation.patch
@@ -0,0 +1,77 @@
+From 12f5c44e7d1cedc9f11402fc5c06ce54a8c24915 Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Wed, 23 Apr 2025 17:53:52 +0200
+Subject: [PATCH 02/10] Add too much logging to room summary over federation
+
+Signed-off-by: Rory& <root@rory.gay>
+---
+ synapse/handlers/room_summary.py | 40 ++++++++++++++++++++++++++++----
+ 1 file changed, 36 insertions(+), 4 deletions(-)
+
+diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
+index 91b131d09b..6e64930682 100644
+--- a/synapse/handlers/room_summary.py
++++ b/synapse/handlers/room_summary.py
+@@ -700,23 +700,55 @@ class RoomSummaryHandler:
+ """
+ # The API doesn't return the room version so assume that a
+ # join rule of knock is valid.
++ join_rule = room.get("join_rule")
++ world_readable = room.get("world_readable")
++
++ logger.warning(
++ "[EMMA] Checking if room %s is accessible to %s: join_rule=%s, world_readable=%s",
++ room_id, requester, join_rule, world_readable
++ )
++
+ if (
+- room.get("join_rule", JoinRules.PUBLIC)
+- in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
+- or room.get("world_readable") is True
++ join_rule in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
++ or world_readable is True
+ ):
+ return True
+- elif not requester:
++ else:
++ logger.warning(
++ "[EMMA] Room %s is not accessible to %s: join_rule=%s, world_readable=%s, join_rule result=%s, world_readable result=%s",
++ room_id, requester, join_rule, world_readable,
++ join_rule in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED),
++ world_readable is True
++ )
++
++ if not requester:
++ logger.warning(
++ "[EMMA] No requester, so room %s is not accessible",
++ room_id
++ )
+ return False
++
+
+ # Check if the user is a member of any of the allowed rooms from the response.
+ allowed_rooms = room.get("allowed_room_ids")
++ logger.warning(
++ "[EMMA] Checking if room %s is in allowed rooms for %s: join_rule=%s, allowed_rooms=%s",
++ requester,
++ room_id,
++ join_rule,
++ allowed_rooms
++ )
+ if allowed_rooms and isinstance(allowed_rooms, list):
+ if await self._event_auth_handler.is_user_in_rooms(
+ allowed_rooms, requester
+ ):
+ return True
+
++ logger.warning(
++ "[EMMA] Checking if room %s is accessble to %s via local state",
++ room_id,
++ requester
++ )
+ # Finally, check locally if we can access the room. The user might
+ # already be in the room (if it was a child room), or there might be a
+ # pending invite, etc.
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0003-Log-entire-room-if-accessibility-check-fails.patch b/packages/overlays/matrix-synapse/patches/0003-Log-entire-room-if-accessibility-check-fails.patch
new file mode 100644
index 0000000..e1676c6
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0003-Log-entire-room-if-accessibility-check-fails.patch
@@ -0,0 +1,28 @@
+From feb88e251b0a7402095643444710f160b9e73daa Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Wed, 23 Apr 2025 18:24:57 +0200
+Subject: [PATCH 03/10] Log entire room if accessibility check fails
+
+Signed-off-by: Rory& <root@rory.gay>
+---
+ synapse/handlers/room_summary.py | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
+index 6e64930682..1c39cfed1b 100644
+--- a/synapse/handlers/room_summary.py
++++ b/synapse/handlers/room_summary.py
+@@ -916,6 +916,10 @@ class RoomSummaryHandler:
+ if not room_entry or not await self._is_remote_room_accessible(
+ requester, room_entry.room_id, room_entry.room
+ ):
++ logger.warning(
++ "[Emma] Room entry contents: %s",
++ room_entry.room if room_entry else None
++ )
+ raise NotFoundError("Room not found or is not accessible")
+
+ room = dict(room_entry.room)
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0004-Log-policy-server-rejected-events.patch b/packages/overlays/matrix-synapse/patches/0004-Log-policy-server-rejected-events.patch
new file mode 100644
index 0000000..63903f1
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0004-Log-policy-server-rejected-events.patch
@@ -0,0 +1,30 @@
+From d06fbc3b1b6158a1e3805d3dd282427268dea01a Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Tue, 27 May 2025 05:21:46 +0200
+Subject: [PATCH 04/10] Log policy server rejected events
+
+---
+ synapse/handlers/room_policy.py | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/synapse/handlers/room_policy.py b/synapse/handlers/room_policy.py
+index dcfebb128c..3a83c4d6ec 100644
+--- a/synapse/handlers/room_policy.py
++++ b/synapse/handlers/room_policy.py
+@@ -84,6 +84,13 @@ class RoomPolicyHandler:
+ policy_server, event
+ )
+ if recommendation != RECOMMENDATION_OK:
++ logger.info(
++ "[POLICY] Policy server %s recommended not to allow event %s in room %s: %s",
++ policy_server,
++ event.event_id,
++ event.room_id,
++ recommendation,
++ )
+ return False
+
+ return True # default allow
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0005-Use-parse_boolean-for-unredacted-content.patch b/packages/overlays/matrix-synapse/patches/0005-Use-parse_boolean-for-unredacted-content.patch
new file mode 100644
index 0000000..bfb3e75
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0005-Use-parse_boolean-for-unredacted-content.patch
@@ -0,0 +1,29 @@
+From 9c3f28c68cb89e81a98561e0898b00c43a280a65 Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Tue, 27 May 2025 06:14:26 +0200
+Subject: [PATCH 05/10] Use parse_boolean for unredacted content
+
+---
+ synapse/rest/client/room.py | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
+index bb41575d46..63ea52baf8 100644
+--- a/synapse/rest/client/room.py
++++ b/synapse/rest/client/room.py
+@@ -914,10 +914,9 @@ class RoomEventServlet(RestServlet):
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+
+ include_unredacted_content = self.msc2815_enabled and (
+- parse_string(
++ parse_boolean(
+ request,
+- "fi.mau.msc2815.include_unredacted_content",
+- allowed_values=("true", "false"),
++ "fi.mau.msc2815.include_unredacted_content"
+ )
+ == "true"
+ )
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0006-Expose-tombstone-in-room-admin-api.patch b/packages/overlays/matrix-synapse/patches/0006-Expose-tombstone-in-room-admin-api.patch
new file mode 100644
index 0000000..c12912e
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0006-Expose-tombstone-in-room-admin-api.patch
@@ -0,0 +1,114 @@
+From 23c042aead65385a500be6e671ddd0e942a7e864 Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Tue, 27 May 2025 06:37:52 +0200
+Subject: [PATCH 06/10] Expose tombstone in room admin api
+
+---
+ synapse/rest/admin/rooms.py | 5 ++++
+ synapse/rest/client/room.py | 1 -
+ synapse/storage/databases/main/room.py | 36 +++++++++++++++++++++++++-
+ 3 files changed, 40 insertions(+), 2 deletions(-)
+
+diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
+index f8c5bf18d4..adac1f0362 100644
+--- a/synapse/rest/admin/rooms.py
++++ b/synapse/rest/admin/rooms.py
+@@ -251,6 +251,10 @@ class ListRoomRestServlet(RestServlet):
+ direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
+ reverse_order = True if direction == Direction.BACKWARDS else False
+
++ emma_include_tombstone = parse_boolean(
++ request, "emma_include_tombstone", default=False
++ )
++
+ # Return list of rooms according to parameters
+ rooms, total_rooms = await self.store.get_rooms_paginate(
+ start,
+@@ -260,6 +264,7 @@ class ListRoomRestServlet(RestServlet):
+ search_term,
+ public_rooms,
+ empty_rooms,
++ emma_include_tombstone = emma_include_tombstone
+ )
+
+ response = {
+diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
+index 63ea52baf8..38230de0de 100644
+--- a/synapse/rest/client/room.py
++++ b/synapse/rest/client/room.py
+@@ -918,7 +918,6 @@ class RoomEventServlet(RestServlet):
+ request,
+ "fi.mau.msc2815.include_unredacted_content"
+ )
+- == "true"
+ )
+ if include_unredacted_content and not await self.auth.is_server_admin(
+ requester
+diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
+index 1df06a5171..347dbbba6b 100644
+--- a/synapse/storage/databases/main/room.py
++++ b/synapse/storage/databases/main/room.py
+@@ -610,6 +610,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
+ search_term: Optional[str],
+ public_rooms: Optional[bool],
+ empty_rooms: Optional[bool],
++ emma_include_tombstone: bool = False,
+ ) -> Tuple[List[Dict[str, Any]], int]:
+ """Function to retrieve a paginated list of rooms as json.
+
+@@ -629,6 +630,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
+ If true, empty rooms are queried.
+ if false, empty rooms are excluded from the query. When it is
+ none (the default), both empty rooms and none-empty rooms are queried.
++ emma_include_tombstone: If true, include tombstone events in the results.
+ Returns:
+ A list of room dicts and an integer representing the total number of
+ rooms that exist given this query
+@@ -797,11 +799,43 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
+ room_count = cast(Tuple[int], txn.fetchone())
+ return rooms, room_count[0]
+
+- return await self.db_pool.runInteraction(
++ result = await self.db_pool.runInteraction(
+ "get_rooms_paginate",
+ _get_rooms_paginate_txn,
+ )
+
++ if emma_include_tombstone:
++ room_id_sql, room_id_args = make_in_list_sql_clause(
++ self.database_engine, "cse.room_id", [r["room_id"] for r in result[0]]
++ )
++
++ tombstone_sql = """
++ SELECT cse.room_id, cse.event_id, ej.json
++ FROM current_state_events cse
++ JOIN event_json ej USING (event_id)
++ WHERE cse.type = 'm.room.tombstone'
++ AND {room_id_sql}
++ """.format(
++ room_id_sql=room_id_sql
++ )
++
++ def _get_tombstones_txn(
++ txn: LoggingTransaction,
++ ) -> Dict[str, JsonDict]:
++ txn.execute(tombstone_sql, room_id_args)
++ for room_id, event_id, json in txn:
++ for result_room in result[0]:
++ if result_room["room_id"] == room_id:
++ result_room["gay.rory.synapse_admin_extensions.tombstone"] = db_to_json(json)
++ break
++ return result[0], result[1]
++
++ result = await self.db_pool.runInteraction(
++ "get_rooms_tombstones", _get_tombstones_txn,
++ )
++
++ return result
++
+ @cached(max_entries=10000)
+ async def get_ratelimit_for_user(self, user_id: str) -> Optional[RatelimitOverride]:
+ """Check if there are any overrides for ratelimiting for the given user
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0007-fix-Always-recheck-messages-pagination-data-if-a-bac.patch b/packages/overlays/matrix-synapse/patches/0007-fix-Always-recheck-messages-pagination-data-if-a-bac.patch
new file mode 100644
index 0000000..4ebc20c
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0007-fix-Always-recheck-messages-pagination-data-if-a-bac.patch
@@ -0,0 +1,204 @@
+From 99b146825a1a8257d05440ae3e331c68b8e1575a Mon Sep 17 00:00:00 2001
+From: Jason Little <j.little@famedly.com>
+Date: Wed, 30 Apr 2025 09:29:42 -0500
+Subject: [PATCH 07/10] fix: Always recheck `/messages` pagination data if a
+ backfill might have been needed (#28)
+
+---
+ synapse/handlers/federation.py | 35 +++++++++++++--------------------
+ synapse/handlers/pagination.py | 36 +++++++++++++++++++---------------
+ 2 files changed, 34 insertions(+), 37 deletions(-)
+
+diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
+index a6de3e824d..ff751d25f6 100644
+--- a/synapse/handlers/federation.py
++++ b/synapse/handlers/federation.py
+@@ -211,7 +211,7 @@ class FederationHandler:
+ @tag_args
+ async def maybe_backfill(
+ self, room_id: str, current_depth: int, limit: int, record_time: bool = True
+- ) -> bool:
++ ) -> None:
+ """Checks the database to see if we should backfill before paginating,
+ and if so do.
+
+@@ -225,8 +225,6 @@ class FederationHandler:
+ should back paginate.
+ record_time: Whether to record the time it takes to backfill.
+
+- Returns:
+- True if we actually tried to backfill something, otherwise False.
+ """
+ # Starting the processing time here so we can include the room backfill
+ # linearizer lock queue in the timing
+@@ -252,7 +250,7 @@ class FederationHandler:
+ limit: int,
+ *,
+ processing_start_time: Optional[int],
+- ) -> bool:
++ ) -> None:
+ """
+ Checks whether the `current_depth` is at or approaching any backfill
+ points in the room and if so, will backfill. We only care about
+@@ -326,7 +324,7 @@ class FederationHandler:
+ limit=1,
+ )
+ if not have_later_backfill_points:
+- return False
++ return None
+
+ logger.debug(
+ "_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
+@@ -346,15 +344,15 @@ class FederationHandler:
+ )
+ # We return `False` because we're backfilling in the background and there is
+ # no new events immediately for the caller to know about yet.
+- return False
++ return None
+
+ # Even after recursing with `MAX_DEPTH`, we didn't find any
+ # backward extremities to backfill from.
+ if not sorted_backfill_points:
+ logger.debug(
+- "_maybe_backfill_inner: Not backfilling as no backward extremeties found."
++ "_maybe_backfill_inner: Not backfilling as no backward extremities found."
+ )
+- return False
++ return None
+
+ # If we're approaching an extremity we trigger a backfill, otherwise we
+ # no-op.
+@@ -373,7 +371,7 @@ class FederationHandler:
+ current_depth,
+ limit,
+ )
+- return False
++ return None
+
+ # For performance's sake, we only want to paginate from a particular extremity
+ # if we can actually see the events we'll get. Otherwise, we'd just spend a lot
+@@ -441,7 +439,7 @@ class FederationHandler:
+ logger.debug(
+ "_maybe_backfill_inner: found no extremities which would be visible"
+ )
+- return False
++ return None
+
+ logger.debug(
+ "_maybe_backfill_inner: extremities_to_request %s", extremities_to_request
+@@ -464,7 +462,7 @@ class FederationHandler:
+ )
+ )
+
+- async def try_backfill(domains: StrCollection) -> bool:
++ async def try_backfill(domains: StrCollection) -> None:
+ # TODO: Should we try multiple of these at a time?
+
+ # Number of contacted remote homeservers that have denied our backfill
+@@ -487,7 +485,7 @@ class FederationHandler:
+ # If this succeeded then we probably already have the
+ # appropriate stuff.
+ # TODO: We can probably do something more intelligent here.
+- return True
++ return None
+ except NotRetryingDestination as e:
+ logger.info("_maybe_backfill_inner: %s", e)
+ continue
+@@ -511,7 +509,7 @@ class FederationHandler:
+ )
+ denied_count += 1
+ if denied_count >= max_denied_count:
+- return False
++ return None
+ continue
+
+ logger.info("Failed to backfill from %s because %s", dom, e)
+@@ -527,7 +525,7 @@ class FederationHandler:
+ )
+ denied_count += 1
+ if denied_count >= max_denied_count:
+- return False
++ return None
+ continue
+
+ logger.info("Failed to backfill from %s because %s", dom, e)
+@@ -539,7 +537,7 @@ class FederationHandler:
+ logger.exception("Failed to backfill from %s because %s", dom, e)
+ continue
+
+- return False
++ return None
+
+ # If we have the `processing_start_time`, then we can make an
+ # observation. We wouldn't have the `processing_start_time` in the case
+@@ -551,14 +549,9 @@ class FederationHandler:
+ (processing_end_time - processing_start_time) / 1000
+ )
+
+- success = await try_backfill(likely_domains)
+- if success:
+- return True
+-
+ # TODO: we could also try servers which were previously in the room, but
+ # are no longer.
+-
+- return False
++ return await try_backfill(likely_domains)
+
+ async def send_invite(self, target_host: str, event: EventBase) -> EventBase:
+ """Sends the invite to the remote server for signing.
+diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
+index 4070b74b7a..81cda38549 100644
+--- a/synapse/handlers/pagination.py
++++ b/synapse/handlers/pagination.py
+@@ -577,27 +577,31 @@ class PaginationHandler:
+ or missing_too_many_events
+ or not_enough_events_to_fill_response
+ ):
+- did_backfill = await self.hs.get_federation_handler().maybe_backfill(
++ # Historical Note: There used to be a check here for if backfill was
++ # successful or not
++ await self.hs.get_federation_handler().maybe_backfill(
+ room_id,
+ curr_topo,
+ limit=pagin_config.limit,
+ )
+
+- # If we did backfill something, refetch the events from the database to
+- # catch anything new that might have been added since we last fetched.
+- if did_backfill:
+- (
+- events,
+- next_key,
+- _,
+- ) = await self.store.paginate_room_events_by_topological_ordering(
+- room_id=room_id,
+- from_key=from_token.room_key,
+- to_key=to_room_key,
+- direction=pagin_config.direction,
+- limit=pagin_config.limit,
+- event_filter=event_filter,
+- )
++ # Regardless if we backfilled or not, another worker or even a
++ # simultaneous request may have backfilled for us while we were held
++ # behind the linearizer. This should not have too much additional
++ # database load as it will only be triggered if a backfill *might* have
++ # been needed
++ (
++ events,
++ next_key,
++ _,
++ ) = await self.store.paginate_room_events_by_topological_ordering(
++ room_id=room_id,
++ from_key=from_token.room_key,
++ to_key=to_room_key,
++ direction=pagin_config.direction,
++ limit=pagin_config.limit,
++ event_filter=event_filter,
++ )
+ else:
+ # Otherwise, we can backfill in the background for eventual
+ # consistency's sake but we don't need to block the client waiting
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0008-Fix-pagination-with-large-gaps-of-rejected-events.patch b/packages/overlays/matrix-synapse/patches/0008-Fix-pagination-with-large-gaps-of-rejected-events.patch
new file mode 100644
index 0000000..81a6d3f
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0008-Fix-pagination-with-large-gaps-of-rejected-events.patch
@@ -0,0 +1,50 @@
+From 6eb23d3018f68744ba363fb7a89a9a4982d67a19 Mon Sep 17 00:00:00 2001
+From: Nicolas Werner <nicolas.werner@hotmail.de>
+Date: Sun, 8 Jun 2025 23:14:31 +0200
+Subject: [PATCH 08/10] Fix pagination with large gaps of rejected events
+
+---
+ synapse/handlers/pagination.py | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
+index 81cda38549..365c9cabcb 100644
+--- a/synapse/handlers/pagination.py
++++ b/synapse/handlers/pagination.py
+@@ -510,7 +510,7 @@ class PaginationHandler:
+ (
+ events,
+ next_key,
+- _,
++ limited,
+ ) = await self.store.paginate_room_events_by_topological_ordering(
+ room_id=room_id,
+ from_key=from_token.room_key,
+@@ -593,7 +593,7 @@ class PaginationHandler:
+ (
+ events,
+ next_key,
+- _,
++ limited,
+ ) = await self.store.paginate_room_events_by_topological_ordering(
+ room_id=room_id,
+ from_key=from_token.room_key,
+@@ -616,6 +616,15 @@ class PaginationHandler:
+
+ next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key)
+
++ # We might have hit some internal filtering first, for example rejected
++ # events. Ensure we return a pagination token then.
++ if not events and limited:
++ return {
++ "chunk": [],
++ "start": await from_token.to_string(self.store),
++ "end": await next_token.to_string(self.store),
++ }
++
+ # if no events are returned from pagination, that implies
+ # we have reached the end of the available events.
+ # In that case we do not return end, to tell the client
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0009-Fix-nix-flake.patch b/packages/overlays/matrix-synapse/patches/0009-Fix-nix-flake.patch
new file mode 100644
index 0000000..09a7f5c
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0009-Fix-nix-flake.patch
@@ -0,0 +1,189 @@
+From 69d854a8250493f9c1e468f24031378ff334cf7f Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Mon, 9 Jun 2025 17:38:34 +0200
+Subject: [PATCH 09/10] Fix nix flake
+
+---
+ flake.lock | 58 +++++++++++++++++++-----------------------------------
+ flake.nix | 10 +++++++++-
+ 2 files changed, 29 insertions(+), 39 deletions(-)
+
+diff --git a/flake.lock b/flake.lock
+index a6a2aea328..4e2f01153b 100644
+--- a/flake.lock
++++ b/flake.lock
+@@ -39,15 +39,12 @@
+ }
+ },
+ "flake-utils": {
+- "inputs": {
+- "systems": "systems"
+- },
+ "locked": {
+- "lastModified": 1685518550,
+- "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
++ "lastModified": 1667395993,
++ "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+- "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
++ "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
+ "type": "github"
+ },
+ "original": {
+@@ -152,27 +149,27 @@
+ },
+ "nixpkgs-stable": {
+ "locked": {
+- "lastModified": 1685801374,
+- "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
++ "lastModified": 1678872516,
++ "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+- "rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
++ "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+- "ref": "nixos-23.05",
++ "ref": "nixos-22.11",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_2": {
+ "locked": {
+- "lastModified": 1729265718,
+- "narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
++ "lastModified": 1748217807,
++ "narHash": "sha256-P3u2PXxMlo49PutQLnk2PhI/imC69hFl1yY4aT5Nax8=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+- "rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
++ "rev": "3108eaa516ae22c2360928589731a4f1581526ef",
+ "type": "github"
+ },
+ "original": {
+@@ -184,11 +181,11 @@
+ },
+ "nixpkgs_3": {
+ "locked": {
+- "lastModified": 1728538411,
+- "narHash": "sha256-f0SBJz1eZ2yOuKUr5CA9BHULGXVSn6miBuUWdTyhUhU=",
++ "lastModified": 1744536153,
++ "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+- "rev": "b69de56fac8c2b6f8fd27f2eca01dcda8e0a4221",
++ "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11",
+ "type": "github"
+ },
+ "original": {
+@@ -213,11 +210,11 @@
+ "nixpkgs-stable": "nixpkgs-stable"
+ },
+ "locked": {
+- "lastModified": 1688056373,
+- "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
++ "lastModified": 1686050334,
++ "narHash": "sha256-R0mczWjDzBpIvM3XXhO908X5e2CQqjyh/gFbwZk/7/Q=",
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+- "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
++ "rev": "6881eb2ae5d8a3516e34714e7a90d9d95914c4dc",
+ "type": "github"
+ },
+ "original": {
+@@ -231,7 +228,7 @@
+ "devenv": "devenv",
+ "nixpkgs": "nixpkgs_2",
+ "rust-overlay": "rust-overlay",
+- "systems": "systems_2"
++ "systems": "systems"
+ }
+ },
+ "rust-overlay": {
+@@ -239,11 +236,11 @@
+ "nixpkgs": "nixpkgs_3"
+ },
+ "locked": {
+- "lastModified": 1731897198,
+- "narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=",
++ "lastModified": 1748313401,
++ "narHash": "sha256-x5UuDKP2Ui/TresAngUo9U4Ss9xfOmN8dAXU8OrkZmA=",
+ "owner": "oxalica",
+ "repo": "rust-overlay",
+- "rev": "0be641045af6d8666c11c2c40e45ffc9667839b5",
++ "rev": "9c8ea175cf9af29edbcff121512e44092a8f37e4",
+ "type": "github"
+ },
+ "original": {
+@@ -266,21 +263,6 @@
+ "repo": "default",
+ "type": "github"
+ }
+- },
+- "systems_2": {
+- "locked": {
+- "lastModified": 1681028828,
+- "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+- "owner": "nix-systems",
+- "repo": "default",
+- "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+- "type": "github"
+- },
+- "original": {
+- "owner": "nix-systems",
+- "repo": "default",
+- "type": "github"
+- }
+ }
+ },
+ "root": "root",
+diff --git a/flake.nix b/flake.nix
+index 749c10da1d..e33b233ece 100644
+--- a/flake.nix
++++ b/flake.nix
+@@ -82,7 +82,7 @@
+ #
+ # NOTE: We currently need to set the Rust version unnecessarily high
+ # in order to work around https://github.com/matrix-org/synapse/issues/15939
+- (rust-bin.stable."1.82.0".default.override {
++ (rust-bin.stable."1.87.0".default.override {
+ # Additionally install the "rust-src" extension to allow diving into the
+ # Rust source code in an IDE (rust-analyzer will also make use of it).
+ extensions = [ "rust-src" ];
+@@ -118,6 +118,8 @@
+ # For releasing Synapse
+ debian-devscripts # (`dch` for manipulating the Debian changelog)
+ libnotify # (the release script uses `notify-send` to tell you when CI jobs are done)
++
++ postgresql.pg_config
+ ];
+
+ # Install Python and manage a virtualenv with Poetry.
+@@ -140,6 +142,9 @@
+ # force compiling those binaries locally instead.
+ env.POETRY_INSTALLER_NO_BINARY = "ruff";
+
++ # Required to make git work
++ env.CARGO_NET_GIT_FETCH_WITH_CLI = "true";
++
+ # Install dependencies for the additional programming languages
+ # involved with Synapse development.
+ #
+@@ -160,6 +165,9 @@
+ services.postgres.initialDatabases = [
+ { name = "synapse"; }
+ ];
++
++ services.postgres.port = 5433;
++
+ # Create a postgres user called 'synapse_user' which has ownership
+ # over the 'synapse' database.
+ services.postgres.initialScript = ''
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/0010-Fix-gitignore-to-ignore-.venv.patch b/packages/overlays/matrix-synapse/patches/0010-Fix-gitignore-to-ignore-.venv.patch
new file mode 100644
index 0000000..abe0eb3
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/0010-Fix-gitignore-to-ignore-.venv.patch
@@ -0,0 +1,24 @@
+From 35c7dfe6a8df912f14d4f18642e2af2675d607af Mon Sep 17 00:00:00 2001
+From: Rory& <root@rory.gay>
+Date: Mon, 9 Jun 2025 17:46:10 +0200
+Subject: [PATCH 10/10] Fix gitignore to ignore .venv
+
+---
+ .gitignore | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/.gitignore b/.gitignore
+index a89f149ec1..0567934c4e 100644
+--- a/.gitignore
++++ b/.gitignore
+@@ -30,6 +30,7 @@ __pycache__/
+ /*.signing.key
+ /env/
+ /.venv*/
++/.venv
+ /homeserver*.yaml
+ /logs
+ /media_store/
+--
+2.49.0
+
diff --git a/packages/overlays/matrix-synapse/patches/synapse-fast-links.patch b/packages/overlays/matrix-synapse/patches/synapse-fast-links.patch
new file mode 100644
index 0000000..c35ba87
--- /dev/null
+++ b/packages/overlays/matrix-synapse/patches/synapse-fast-links.patch
@@ -0,0 +1,87 @@
+diff --git a/synapse/storage/database.py b/synapse/storage/database.py
+index cb4a585..1196781 100644
+--- a/synapse/storage/database.py
++++ b/synapse/storage/database.py
+@@ -2503,6 +2503,49 @@ class DatabasePool:
+
+ return txn.fetchall()
+
++# requires database_engine.supports_using_any_list to be true
++def make_select_id_if_found_sql_clause(
++ database_engine: BaseDatabaseEngine,
++ column: str,
++ table: str,
++ iterable: Collection[Any],
++ *,
++ negative: bool = False,
++) -> Tuple[str, list]:
++ """Returns an SQL clause that checks the given column is in the iterable.
++
++ On SQLite this expands to `column IN (?, ?, ...)`, whereas on Postgres
++ it expands to `column = ANY(?)`. While both DBs support the `IN` form,
++ using the `ANY` form on postgres means that it views queries with
++ different length iterables as the same, helping the query stats.
++
++ Args:
++ database_engine
++ column: Name of the column
++ table: Name of the table
++ iterable: The values to check the column against.
++ negative: Whether we should check for inequality, i.e. `NOT IN`
++
++ Returns:
++ A tuple of SQL query and the args
++ """
++ # This should hopefully be faster, but also makes postgres query
++ # stats easier to understand.
++ if database_engine.supports_using_any_list:
++ if not negative:
++ clause = f"{column}_lookup AS {column} FROM UNNEST(?::bigint[]) {column}_lookup WHERE EXISTS(SELECT FROM {table} WHERE {column}={column}_lookup)"
++ else:
++ clause = f"{column}_lookup AS {column} FROM UNNEST(?::bigint[]) {column}_lookup WHERE NOT EXISTS(SELECT FROM {table} WHERE {column}={column}_lookup)"
++
++ return clause, [list(iterable)]
++ else:
++ params = ",".join("?" for _ in iterable)
++ if not negative:
++ clause = f"DISTINCT {column} FROM {table} WHERE {column} IN ({params})"
++ else:
++ clause = f"DISTINCT {column} FROM {table} WHERE {column} NOT IN ({params})"
++ return clause, list(iterable)
++
+
+ def make_in_list_sql_clause(
+ database_engine: BaseDatabaseEngine,
+diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
+index 46aa590..026f011 100644
+--- a/synapse/storage/databases/main/event_federation.py
++++ b/synapse/storage/databases/main/event_federation.py
+@@ -52,6 +52,7 @@ from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
++ make_select_id_if_found_sql_clause,
+ )
+ from synapse.storage.databases.main.events_worker import EventsWorkerStore
+ from synapse.storage.databases.main.signatures import SignatureWorkerStore
+@@ -362,8 +363,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
+ sql = """
+ WITH RECURSIVE links(chain_id) AS (
+ SELECT
+- DISTINCT origin_chain_id
+- FROM event_auth_chain_links WHERE %s
++ %s
+ UNION
+ SELECT
+ target_chain_id
+@@ -380,8 +380,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
+ while chains_to_fetch:
+ batch2 = tuple(itertools.islice(chains_to_fetch, 1000))
+ chains_to_fetch.difference_update(batch2)
+- clause, args = make_in_list_sql_clause(
+- txn.database_engine, "origin_chain_id", batch2
++ clause, args = make_select_id_if_found_sql_clause(
++ txn.database_engine, "origin_chain_id", "event_auth_chain_links", batch2
+ )
+ txn.execute(sql % (clause,), args)
+
|