From 69048f7b4848ab6a4ae6cb233f8cbf36d73c0ba1 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 22 Aug 2023 07:15:34 -0700 Subject: Add an admin endpoint to allow authorizing server to signal token revocations (#16125) --- synapse/replication/tcp/client.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'synapse/replication') diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 139f57cf86..04e8cff6ea 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -26,6 +26,7 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yielda from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.streams import ( AccountDataStream, + CachesStream, DeviceListsStream, PushersStream, PushRulesStream, @@ -73,6 +74,7 @@ class ReplicationDataHandler: self._instance_name = hs.get_instance_name() self._typing_handler = hs.get_typing_handler() self._state_storage_controller = hs.get_storage_controllers().state + self.auth = hs.get_auth() self._notify_pushers = hs.config.worker.start_pushers self._pusher_pool = hs.get_pusherpool() @@ -218,6 +220,16 @@ class ReplicationDataHandler: self._state_storage_controller.notify_event_un_partial_stated( row.event_id ) + # invalidate the introspection token cache + elif stream_name == CachesStream.NAME: + for row in rows: + if row.cache_func == "introspection_token_invalidation": + if row.keys[0] is None: + # invalidate the whole cache + # mypy ignore - the token cache is defined on MSC3861DelegatedAuth + self.auth.invalidate_token_cache() # type: ignore[attr-defined] + else: + self.auth.invalidate_cached_tokens(row.keys) # type: ignore[attr-defined] await self._presence_handler.process_replication_rows( stream_name, instance_name, token, rows -- cgit 1.5.1 From 803f63df1c52237a23cb68c1b2a8402200a7216d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 Aug 2023 16:11:22 +0100 Subject: Fix perf of `wait_for_stream_positions` (#16148) --- changelog.d/16148.bugfix | 1 + synapse/replication/tcp/client.py | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16148.bugfix (limited to 'synapse/replication') diff --git a/changelog.d/16148.bugfix b/changelog.d/16148.bugfix new file mode 100644 index 0000000000..fea316f856 --- /dev/null +++ b/changelog.d/16148.bugfix @@ -0,0 +1 @@ +Fix performance degredation when there are a lot of in-flight replication requests. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 04e8cff6ea..3b88dc68ea 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -14,7 +14,9 @@ """A replication client for use by synapse workers. """ import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple + +from sortedcontainers import SortedList from twisted.internet import defer from twisted.internet.defer import Deferred @@ -86,7 +88,9 @@ class ReplicationDataHandler: # Map from stream and instance to list of deferreds waiting for the stream to # arrive at a particular position. The lists are sorted by stream position. - self._streams_to_waiters: Dict[Tuple[str, str], List[Tuple[int, Deferred]]] = {} + self._streams_to_waiters: Dict[ + Tuple[str, str], SortedList[Tuple[int, Deferred]] + ] = {} async def on_rdata( self, stream_name: str, instance_name: str, token: int, rows: list @@ -238,7 +242,9 @@ class ReplicationDataHandler: # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is # greater than the received row position. - waiting_list = self._streams_to_waiters.get((stream_name, instance_name), []) + waiting_list = self._streams_to_waiters.get((stream_name, instance_name)) + if not waiting_list: + return # Index of first item with a position after the current token, i.e we # have called all deferreds before this index. If not overwritten by @@ -262,7 +268,7 @@ class ReplicationDataHandler: # Drop all entries in the waiting list that were called in the above # loop. (This maintains the order so no need to resort) - waiting_list[:] = waiting_list[index_of_first_deferred_not_called:] + del waiting_list[:index_of_first_deferred_not_called] for deferred in deferreds_to_callback: try: @@ -322,11 +328,10 @@ class ReplicationDataHandler: ) waiting_list = self._streams_to_waiters.setdefault( - (stream_name, instance_name), [] + (stream_name, instance_name), SortedList(key=lambda t: t[0]) ) - waiting_list.append((position, deferred)) - waiting_list.sort(key=lambda t: t[0]) + waiting_list.add((position, deferred)) # We measure here to get in flight counts and average waiting time. with Measure(self._clock, "repl.wait_for_stream_position"): -- cgit 1.5.1