diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index ff150f0be7..c342df2a8b 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -42,8 +42,8 @@ from synapse.storage.database import DatabasePool
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
from synapse.types import Collection, get_domain_from_id
-from synapse.util.caches.deferred_cache import DeferredCache
from synapse.util.caches.descriptors import cached
+from synapse.util.caches.lrucache import LruCache
from synapse.util.iterutils import batch_iter
from synapse.util.metrics import Measure
@@ -137,7 +137,7 @@ class EventsWorkerStore(SQLBaseStore):
db_conn, "events", "stream_ordering", step=-1
)
- if not hs.config.worker.worker_app:
+ if hs.config.run_background_tasks:
# We periodically clean out old transaction ID mappings
self._clock.looping_call(
run_as_background_process,
@@ -146,11 +146,10 @@ class EventsWorkerStore(SQLBaseStore):
self._cleanup_old_transaction_ids,
)
- self._get_event_cache = DeferredCache(
- "*getEvent*",
+ self._get_event_cache = LruCache(
+ cache_name="*getEvent*",
keylen=3,
- max_entries=hs.config.caches.event_cache_size,
- apply_cache_factor_from_config=False,
+ max_size=hs.config.caches.event_cache_size,
)
self._event_fetch_lock = threading.Condition()
@@ -749,7 +748,7 @@ class EventsWorkerStore(SQLBaseStore):
event=original_ev, redacted_event=redacted_event
)
- self._get_event_cache.prefill((event_id,), cache_entry)
+ self._get_event_cache.set((event_id,), cache_entry)
result_map[event_id] = cache_entry
return result_map
|