diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index badf9a5f40..c1bf98cdcb 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -251,13 +251,11 @@ class SQLBaseStore(object):
self._txn_perf_counters = PerformanceCounters()
self._get_event_counters = PerformanceCounters()
- self._get_event_cache = LruCache(hs.config.event_cache_size)
+ self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
+ max_entries=hs.config.event_cache_size)
self.database_engine = hs.database_engine
- # Pretend the getEventCache is just another named cache
- caches_by_name["*getEvent*"] = self._get_event_cache
-
self._stream_id_gen = StreamIdGenerator()
self._transaction_id_gen = IdGenerator("sent_transactions", "id", self)
self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
@@ -768,6 +766,12 @@ class SQLBaseStore(object):
return [e for e in events if e]
+ def _invalidate_get_event_cache(self, event_id):
+ for check_redacted in (False, True):
+ for get_prev_content in (False, True):
+ self._get_event_cache.invalidate(event_id, check_redacted,
+ get_prev_content)
+
def _get_event_txn(self, txn, event_id, check_redacted=True,
get_prev_content=False, allow_rejected=False):
@@ -778,16 +782,14 @@ class SQLBaseStore(object):
sql_getevents_timer.inc_by(curr_time - last_time, desc)
return curr_time
- cache = self._get_event_cache.setdefault(event_id, {})
-
try:
- # Separate cache entries for each way to invoke _get_event_txn
- ret = cache[(check_redacted, get_prev_content, allow_rejected)]
+ ret = self._get_event_cache.get(event_id, check_redacted, get_prev_content)
- cache_counter.inc_hits("*getEvent*")
- return ret
+ if allow_rejected or not ret.rejected_reason:
+ return ret
+ else:
+ return None
except KeyError:
- cache_counter.inc_misses("*getEvent*")
pass
finally:
start_time = update_counter("event_cache", start_time)
@@ -812,19 +814,22 @@ class SQLBaseStore(object):
start_time = update_counter("select_event", start_time)
+ result = self._get_event_from_row_txn(
+ txn, internal_metadata, js, redacted,
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ rejected_reason=rejected_reason,
+ )
+ self._get_event_cache.prefill(event_id, check_redacted, get_prev_content, result)
+
if allow_rejected or not rejected_reason:
- result = self._get_event_from_row_txn(
- txn, internal_metadata, js, redacted,
- check_redacted=check_redacted,
- get_prev_content=get_prev_content,
- )
- cache[(check_redacted, get_prev_content, allow_rejected)] = result
return result
else:
return None
def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
- check_redacted=True, get_prev_content=False):
+ check_redacted=True, get_prev_content=False,
+ rejected_reason=None):
start_time = time.time() * 1000
@@ -841,7 +846,11 @@ class SQLBaseStore(object):
internal_metadata = json.loads(str(internal_metadata).decode("utf8"))
start_time = update_counter("decode_internal", start_time)
- ev = FrozenEvent(d, internal_metadata_dict=internal_metadata)
+ ev = FrozenEvent(
+ d,
+ internal_metadata_dict=internal_metadata,
+ rejected_reason=rejected_reason,
+ )
start_time = update_counter("build_frozen_event", start_time)
if check_redacted and redacted:
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index f066484c7e..a2e87c27ce 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -93,7 +93,7 @@ class EventsStore(SQLBaseStore):
current_state=None):
# Remove the any existing cache entries for the event_id
- self._get_event_cache.pop(event.event_id)
+ self._invalidate_get_event_cache(event.event_id)
if stream_ordering is None:
with self._stream_id_gen.get_next_txn(txn) as stream_ordering:
@@ -356,7 +356,7 @@ class EventsStore(SQLBaseStore):
def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event
- self._get_event_cache.pop(event.redacts)
+ self._invalidate_get_event_cache(event.redacts)
txn.execute(
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
(event.event_id, event.redacts)
|