diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py
index 1adc92eb90..dd392cf694 100644
--- a/synapse/util/caches/deferred_cache.py
+++ b/synapse/util/caches/deferred_cache.py
@@ -283,7 +283,9 @@ class DeferredCache(Generic[KT, VT]):
# we return a new Deferred which will be called before any subsequent observers.
return observable.observe()
- def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None):
+ def prefill(
+ self, key: KT, value: VT, callback: Optional[Callable[[], None]] = None
+ ):
callbacks = [callback] if callback else []
self.cache.set(key, value, callbacks=callbacks)
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index e15f7ee698..4dc3477e89 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -15,40 +15,50 @@
import logging
from collections import OrderedDict
+from typing import Any, Generic, Optional, TypeVar, Union, overload
+
+import attr
+from typing_extensions import Literal
from synapse.config import cache as cache_config
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import Clock
from synapse.util.caches import register_cache
logger = logging.getLogger(__name__)
-SENTINEL = object()
+SENTINEL = object() # type: Any
+
+T = TypeVar("T")
+KT = TypeVar("KT")
+VT = TypeVar("VT")
-class ExpiringCache:
+
+class ExpiringCache(Generic[KT, VT]):
def __init__(
self,
- cache_name,
- clock,
- max_len=0,
- expiry_ms=0,
- reset_expiry_on_get=False,
- iterable=False,
+ cache_name: str,
+ clock: Clock,
+ max_len: int = 0,
+ expiry_ms: int = 0,
+ reset_expiry_on_get: bool = False,
+ iterable: bool = False,
):
"""
Args:
- cache_name (str): Name of this cache, used for logging.
- clock (Clock)
- max_len (int): Max size of dict. If the dict grows larger than this
+ cache_name: Name of this cache, used for logging.
+ clock
+ max_len: Max size of dict. If the dict grows larger than this
then the oldest items get automatically evicted. Default is 0,
which indicates there is no max limit.
- expiry_ms (int): How long before an item is evicted from the cache
+ expiry_ms: How long before an item is evicted from the cache
in milliseconds. Default is 0, indicating items never get
evicted based on time.
- reset_expiry_on_get (bool): If true, will reset the expiry time for
+ reset_expiry_on_get: If true, will reset the expiry time for
an item on access. Defaults to False.
- iterable (bool): If true, the size is calculated by summing the
+ iterable: If true, the size is calculated by summing the
sizes of all entries, rather than the number of entries.
"""
self._cache_name = cache_name
@@ -62,7 +72,7 @@ class ExpiringCache:
self._expiry_ms = expiry_ms
self._reset_expiry_on_get = reset_expiry_on_get
- self._cache = OrderedDict()
+ self._cache = OrderedDict() # type: OrderedDict[KT, _CacheEntry]
self.iterable = iterable
@@ -79,12 +89,12 @@ class ExpiringCache:
self._clock.looping_call(f, self._expiry_ms / 2)
- def __setitem__(self, key, value):
+ def __setitem__(self, key: KT, value: VT) -> None:
now = self._clock.time_msec()
self._cache[key] = _CacheEntry(now, value)
self.evict()
- def evict(self):
+ def evict(self) -> None:
# Evict if there are now too many items
while self._max_size and len(self) > self._max_size:
_key, value = self._cache.popitem(last=False)
@@ -93,7 +103,7 @@ class ExpiringCache:
else:
self.metrics.inc_evictions()
- def __getitem__(self, key):
+ def __getitem__(self, key: KT) -> VT:
try:
entry = self._cache[key]
self.metrics.inc_hits()
@@ -106,7 +116,7 @@ class ExpiringCache:
return entry.value
- def pop(self, key, default=SENTINEL):
+ def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]:
"""Removes and returns the value with the given key from the cache.
If the key isn't in the cache then `default` will be returned if
@@ -115,29 +125,40 @@ class ExpiringCache:
Identical functionality to `dict.pop(..)`.
"""
- value = self._cache.pop(key, default)
+ value = self._cache.pop(key, SENTINEL)
+ # The key was not found.
if value is SENTINEL:
- raise KeyError(key)
+ if default is SENTINEL:
+ raise KeyError(key)
+ return default
- return value
+ return value.value
- def __contains__(self, key):
+ def __contains__(self, key: KT) -> bool:
return key in self._cache
- def get(self, key, default=None):
+ @overload
+ def get(self, key: KT, default: Literal[None] = None) -> Optional[VT]:
+ ...
+
+ @overload
+ def get(self, key: KT, default: T) -> Union[VT, T]:
+ ...
+
+ def get(self, key: KT, default: Optional[T] = None) -> Union[VT, Optional[T]]:
try:
return self[key]
except KeyError:
return default
- def setdefault(self, key, value):
+ def setdefault(self, key: KT, value: VT) -> VT:
try:
return self[key]
except KeyError:
self[key] = value
return value
- def _prune_cache(self):
+ def _prune_cache(self) -> None:
if not self._expiry_ms:
# zero expiry time means don't expire. This should never get called
# since we have this check in start too.
@@ -166,7 +187,7 @@ class ExpiringCache:
len(self),
)
- def __len__(self):
+ def __len__(self) -> int:
if self.iterable:
return sum(len(entry.value) for entry in self._cache.values())
else:
@@ -190,9 +211,7 @@ class ExpiringCache:
return False
+@attr.s(slots=True)
class _CacheEntry:
- __slots__ = ["time", "value"]
-
- def __init__(self, time, value):
- self.time = time
- self.value = value
+ time = attr.ib(type=int)
+ value = attr.ib()
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 60bb6ff642..20c8e2d9f5 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -57,12 +57,14 @@ def enumerate_leaves(node, depth):
class _Node:
__slots__ = ["prev_node", "next_node", "key", "value", "callbacks"]
- def __init__(self, prev_node, next_node, key, value, callbacks=set()):
+ def __init__(
+ self, prev_node, next_node, key, value, callbacks: Optional[set] = None
+ ):
self.prev_node = prev_node
self.next_node = next_node
self.key = key
self.value = value
- self.callbacks = callbacks
+ self.callbacks = callbacks or set()
class LruCache(Generic[KT, VT]):
@@ -176,10 +178,10 @@ class LruCache(Generic[KT, VT]):
self.len = synchronized(cache_len)
- def add_node(key, value, callbacks=set()):
+ def add_node(key, value, callbacks: Optional[set] = None):
prev_node = list_root
next_node = prev_node.next_node
- node = _Node(prev_node, next_node, key, value, callbacks)
+ node = _Node(prev_node, next_node, key, value, callbacks or set())
prev_node.next_node = node
next_node.prev_node = node
cache[key] = node
@@ -237,7 +239,7 @@ class LruCache(Generic[KT, VT]):
def cache_get(
key: KT,
default: Optional[T] = None,
- callbacks: Iterable[Callable[[], None]] = [],
+ callbacks: Iterable[Callable[[], None]] = (),
update_metrics: bool = True,
):
node = cache.get(key, None)
@@ -253,7 +255,7 @@ class LruCache(Generic[KT, VT]):
return default
@synchronized
- def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = []):
+ def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()):
node = cache.get(key, None)
if node is not None:
# We sometimes store large objects, e.g. dicts, which cause
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 1023c856d1..019cfa17cc 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -105,7 +105,13 @@ class Measure:
"start",
]
- def __init__(self, clock, name):
+ def __init__(self, clock, name: str):
+ """
+ Args:
+ clock: A n object with a "time()" method, which returns the current
+ time in seconds.
+ name: The name of the metric to report.
+ """
self.clock = clock
self.name = name
curr_context = current_context()
@@ -118,10 +124,8 @@ class Measure:
else:
assert isinstance(curr_context, LoggingContext)
parent_context = curr_context
- self._logging_context = LoggingContext(
- "Measure[%s]" % (self.name,), parent_context
- )
- self.start = None
+ self._logging_context = LoggingContext(str(curr_context), parent_context)
+ self.start = None # type: Optional[int]
def __enter__(self) -> "Measure":
if self.start is not None:
|