summary refs log tree commit diff
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2021-05-10 14:49:39 +0100
committerErik Johnston <erik@matrix.org>2021-05-10 14:49:39 +0100
commitc0a7348580b4311175e59e1be00b2bff850ee0f7 (patch)
tree17a7ddbc6fc3b273aef383bb90b098037da676e0
parentNewsfile (diff)
downloadsynapse-c0a7348580b4311175e59e1be00b2bff850ee0f7.tar.xz
Handle deduplicating multiple fetch event requests
-rw-r--r--synapse/storage/databases/main/events_worker.py17
1 files changed, 16 insertions, 1 deletions
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 66eaf946d7..430dad1b9b 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -721,6 +721,9 @@ class EventsWorkerStore(SQLBaseStore):
             if events_to_fetch:
                 logger.debug("Also fetching redaction events %s", events_to_fetch)
 
+        # The events to return
+        result_map = {}  # type: Dict[str, _EventCacheEntry]
+
         # build a map from event_id to EventBase
         event_map = {}
         for event_id, row in fetched_events.items():
@@ -733,6 +736,16 @@ class EventsWorkerStore(SQLBaseStore):
             if not allow_rejected and rejected_reason:
                 continue
 
+            # Check whether we already have this event in memory. This can
+            # happen multiple requests for the same event happen at the same
+            # time. (Ideally we'd have make it so that this doesn't happen, but
+            # that would require a larger refactor).
+            cached_entry = self._in_memory_events.get(event_id)
+            if cached_entry is not None:
+                result_map[event_id] = cached_entry
+                self._get_event_cache.set((event_id,), cached_entry)
+                continue
+
             # If the event or metadata cannot be parsed, log the error and act
             # as if the event is unknown.
             try:
@@ -826,8 +839,10 @@ class EventsWorkerStore(SQLBaseStore):
 
         # finally, we can decide whether each one needs redacting, and build
         # the cache entries.
-        result_map = {}
         for event_id, original_ev in event_map.items():
+            if event_id in result_map:
+                continue
+
             redactions = fetched_events[event_id]["redactions"]
             redacted_event = self._maybe_redact_event_row(
                 original_ev, redactions, event_map