1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
|
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import weakref
from functools import wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generic,
Iterable,
List,
Optional,
Type,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import Literal
from twisted.internet import reactor
from twisted.internet.interfaces import IReactorTime
from synapse.config import cache as cache_config
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.util import Clock, caches
from synapse.util.caches import CacheMetric, register_cache
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
from synapse.util.linked_list import ListNode
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
try:
from pympler.asizeof import Asizer
def _get_size_of(val: Any, *, recurse=True) -> int:
"""Get an estimate of the size in bytes of the object.
Args:
val: The object to size.
recurse: If true will include referenced values in the size,
otherwise only sizes the given object.
"""
# Ignore singleton values when calculating memory usage.
if val in ((), None, ""):
return 0
sizer = Asizer()
sizer.exclude_refs((), None, "")
return sizer.asizeof(val, limit=100 if recurse else 0)
except ImportError:
def _get_size_of(val: Any, *, recurse=True) -> int:
return 0
# Function type: the type used for invalidation callbacks
FT = TypeVar("FT", bound=Callable[..., Any])
# Key and Value type for the cache
KT = TypeVar("KT")
VT = TypeVar("VT")
# a general type var, distinct from either KT or VT
T = TypeVar("T")
def enumerate_leaves(node, depth):
if depth == 0:
yield node
else:
for n in node.values():
yield from enumerate_leaves(n, depth - 1)
P = TypeVar("P")
class _TimedListNode(ListNode[P]):
"""A `ListNode` that tracks last access time."""
__slots__ = ["last_access_ts_secs"]
def update_last_access(self, clock: Clock):
self.last_access_ts_secs = int(clock.time())
# Whether to insert new cache entries to the global list. We only add to it if
# time based eviction is enabled.
USE_GLOBAL_LIST = False
# A linked list of all cache entries, allowing efficient time based eviction.
GLOBAL_ROOT = ListNode["_Node"].create_root_node()
@wrap_as_background_process("LruCache._expire_old_entries")
async def _expire_old_entries(clock: Clock, expiry_seconds: int):
"""Walks the global cache list to find cache entries that haven't been
accessed in the given number of seconds.
"""
now = int(clock.time())
node = GLOBAL_ROOT.prev_node
assert node is not None
i = 0
logger.debug("Searching for stale caches")
while node is not GLOBAL_ROOT:
# Only the root node isn't a `_TimedListNode`.
assert isinstance(node, _TimedListNode)
if node.last_access_ts_secs > now - expiry_seconds:
break
cache_entry = node.get_cache_entry()
next_node = node.prev_node
# The node should always have a reference to a cache entry and a valid
# `prev_node`, as we only drop them when we remove the node from the
# list.
assert next_node is not None
assert cache_entry is not None
cache_entry.drop_from_cache()
# If we do lots of work at once we yield to allow other stuff to happen.
if (i + 1) % 10000 == 0:
logger.debug("Waiting during drop")
await clock.sleep(0)
logger.debug("Waking during drop")
node = next_node
# If we've yielded then our current node may have been evicted, so we
# need to check that its still valid.
if node.prev_node is None:
break
i += 1
logger.info("Dropped %d items from caches", i)
def setup_expire_lru_cache_entries(hs: "HomeServer"):
"""Start a background job that expires all cache entries if they have not
been accessed for the given number of seconds.
"""
if not hs.config.caches.expiry_time_msec:
return
logger.info(
"Expiring LRU caches after %d seconds", hs.config.caches.expiry_time_msec / 1000
)
global USE_GLOBAL_LIST
USE_GLOBAL_LIST = True
clock = hs.get_clock()
clock.looping_call(
_expire_old_entries, 30 * 1000, clock, hs.config.caches.expiry_time_msec / 1000
)
class _Node:
__slots__ = [
"_list_node",
"_global_list_node",
"_cache",
"key",
"value",
"callbacks",
"memory",
]
def __init__(
self,
root: "ListNode[_Node]",
key,
value,
cache: "weakref.ReferenceType[LruCache]",
clock: Clock,
callbacks: Collection[Callable[[], None]] = (),
):
self._list_node = ListNode.insert_after(self, root)
self._global_list_node = None
if USE_GLOBAL_LIST:
self._global_list_node = _TimedListNode.insert_after(self, GLOBAL_ROOT)
self._global_list_node.update_last_access(clock)
# We store a weak reference to the cache object so that this _Node can
# remove itself from the cache. If the cache is dropped we ensure we
# remove our entries in the lists.
self._cache = cache
self.key = key
self.value = value
# Set of callbacks to run when the node gets deleted. We store as a list
# rather than a set to keep memory usage down (and since we expect few
# entries per node, the performance of checking for duplication in a
# list vs using a set is negligible).
#
# Note that we store this as an optional list to keep the memory
# footprint down. Storing `None` is free as its a singleton, while empty
# lists are 56 bytes (and empty sets are 216 bytes, if we did the naive
# thing and used sets).
self.callbacks: Optional[List[Callable[[], None]]] = None
self.add_callbacks(callbacks)
self.memory = 0
if caches.TRACK_MEMORY_USAGE:
self.memory = (
_get_size_of(key)
+ _get_size_of(value)
+ _get_size_of(self._list_node, recurse=False)
+ _get_size_of(self.callbacks, recurse=False)
+ _get_size_of(self, recurse=False)
)
self.memory += _get_size_of(self.memory, recurse=False)
if self._global_list_node:
self.memory += _get_size_of(self._global_list_node, recurse=False)
self.memory += _get_size_of(self._global_list_node.last_access_ts_secs)
def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None:
"""Add to stored list of callbacks, removing duplicates."""
if not callbacks:
return
if not self.callbacks:
self.callbacks = []
for callback in callbacks:
if callback not in self.callbacks:
self.callbacks.append(callback)
def run_and_clear_callbacks(self) -> None:
"""Run all callbacks and clear the stored list of callbacks. Used when
the node is being deleted.
"""
if not self.callbacks:
return
for callback in self.callbacks:
callback()
self.callbacks = None
def drop_from_cache(self) -> None:
"""Drop this node from the cache.
Ensures that the entry gets removed from the cache and that we get
removed from all lists.
"""
cache = self._cache()
if not cache or not cache.pop(self.key, None):
# `cache.pop` should call `drop_from_lists()`, unless this Node had
# already been removed from the cache.
self.drop_from_lists()
def drop_from_lists(self) -> None:
"""Remove this node from the cache lists."""
self._list_node.remove_from_list()
if self._global_list_node:
self._global_list_node.remove_from_list()
def move_to_front(self, clock: Clock, cache_list_root: ListNode) -> None:
"""Moves this node to the front of all the lists its in."""
self._list_node.move_after(cache_list_root)
if self._global_list_node:
self._global_list_node.move_after(GLOBAL_ROOT)
self._global_list_node.update_last_access(clock)
class LruCache(Generic[KT, VT]):
"""
Least-recently-used cache, supporting prometheus metrics and invalidation callbacks.
If cache_type=TreeCache, all keys must be tuples.
"""
def __init__(
self,
max_size: int,
cache_name: Optional[str] = None,
cache_type: Type[Union[dict, TreeCache]] = dict,
size_callback: Optional[Callable] = None,
metrics_collection_callback: Optional[Callable[[], None]] = None,
apply_cache_factor_from_config: bool = True,
clock: Optional[Clock] = None,
):
"""
Args:
max_size: The maximum amount of entries the cache can hold
cache_name: The name of this cache, for the prometheus metrics. If unset,
no metrics will be reported on this cache.
cache_type (type):
type of underlying cache to be used. Typically one of dict
or TreeCache.
size_callback (func(V) -> int | None):
metrics_collection_callback:
metrics collection callback. This is called early in the metrics
collection process, before any of the metrics registered with the
prometheus Registry are collected, so can be used to update any dynamic
metrics.
Ignored if cache_name is None.
apply_cache_factor_from_config (bool): If true, `max_size` will be
multiplied by a cache factor derived from the homeserver config
"""
# Default `clock` to something sensible. Note that we rename it to
# `real_clock` so that mypy doesn't think its still `Optional`.
if clock is None:
real_clock = Clock(cast(IReactorTime, reactor))
else:
real_clock = clock
cache = cache_type()
self.cache = cache # Used for introspection.
self.apply_cache_factor_from_config = apply_cache_factor_from_config
# Save the original max size, and apply the default size factor.
self._original_max_size = max_size
# We previously didn't apply the cache factor here, and as such some caches were
# not affected by the global cache factor. Add an option here to disable applying
# the cache factor when a cache is created
if apply_cache_factor_from_config:
self.max_size = int(max_size * cache_config.properties.default_factor_size)
else:
self.max_size = int(max_size)
# register_cache might call our "set_cache_factor" callback; there's nothing to
# do yet when we get resized.
self._on_resize: Optional[Callable[[], None]] = None
if cache_name is not None:
metrics: Optional[CacheMetric] = register_cache(
"lru_cache",
cache_name,
self,
collect_callback=metrics_collection_callback,
)
else:
metrics = None
# this is exposed for access from outside this class
self.metrics = metrics
# We create a single weakref to self here so that we don't need to keep
# creating more each time we create a `_Node`.
weak_ref_to_self = weakref.ref(self)
list_root = ListNode[_Node].create_root_node()
lock = threading.Lock()
def evict() -> None:
while cache_len() > self.max_size:
# Get the last node in the list (i.e. the oldest node).
todelete = list_root.prev_node
# The list root should always have a valid `prev_node` if the
# cache is not empty.
assert todelete is not None
# The node should always have a reference to a cache entry, as
# we only drop the cache entry when we remove the node from the
# list.
node = todelete.get_cache_entry()
assert node is not None
evicted_len = delete_node(node)
cache.pop(node.key, None)
if metrics:
metrics.inc_evictions(evicted_len)
def synchronized(f: FT) -> FT:
@wraps(f)
def inner(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return cast(FT, inner)
cached_cache_len = [0]
if size_callback is not None:
def cache_len():
return cached_cache_len[0]
else:
def cache_len():
return len(cache)
self.len = synchronized(cache_len)
def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()):
node = _Node(list_root, key, value, weak_ref_to_self, real_clock, callbacks)
cache[key] = node
if size_callback:
cached_cache_len[0] += size_callback(node.value)
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.inc_memory_usage(node.memory)
def move_node_to_front(node: _Node):
node.move_to_front(real_clock, list_root)
def delete_node(node: _Node) -> int:
node.drop_from_lists()
deleted_len = 1
if size_callback:
deleted_len = size_callback(node.value)
cached_cache_len[0] -= deleted_len
node.run_and_clear_callbacks()
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.dec_memory_usage(node.memory)
return deleted_len
@overload
def cache_get(
key: KT,
default: Literal[None] = None,
callbacks: Collection[Callable[[], None]] = ...,
update_metrics: bool = ...,
) -> Optional[VT]:
...
@overload
def cache_get(
key: KT,
default: T,
callbacks: Collection[Callable[[], None]] = ...,
update_metrics: bool = ...,
) -> Union[T, VT]:
...
@synchronized
def cache_get(
key: KT,
default: Optional[T] = None,
callbacks: Collection[Callable[[], None]] = (),
update_metrics: bool = True,
):
node = cache.get(key, None)
if node is not None:
move_node_to_front(node)
node.add_callbacks(callbacks)
if update_metrics and metrics:
metrics.inc_hits()
return node.value
else:
if update_metrics and metrics:
metrics.inc_misses()
return default
@synchronized
def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()):
node = cache.get(key, None)
if node is not None:
# We sometimes store large objects, e.g. dicts, which cause
# the inequality check to take a long time. So let's only do
# the check if we have some callbacks to call.
if value != node.value:
node.run_and_clear_callbacks()
# We don't bother to protect this by value != node.value as
# generally size_callback will be cheap compared with equality
# checks. (For example, taking the size of two dicts is quicker
# than comparing them for equality.)
if size_callback:
cached_cache_len[0] -= size_callback(node.value)
cached_cache_len[0] += size_callback(value)
node.add_callbacks(callbacks)
move_node_to_front(node)
node.value = value
else:
add_node(key, value, set(callbacks))
evict()
@synchronized
def cache_set_default(key: KT, value: VT) -> VT:
node = cache.get(key, None)
if node is not None:
return node.value
else:
add_node(key, value)
evict()
return value
@overload
def cache_pop(key: KT, default: Literal[None] = None) -> Optional[VT]:
...
@overload
def cache_pop(key: KT, default: T) -> Union[T, VT]:
...
@synchronized
def cache_pop(key: KT, default: Optional[T] = None):
node = cache.get(key, None)
if node:
delete_node(node)
cache.pop(node.key, None)
return node.value
else:
return default
@synchronized
def cache_del_multi(key: KT) -> None:
"""Delete an entry, or tree of entries
If the LruCache is backed by a regular dict, then "key" must be of
the right type for this cache
If the LruCache is backed by a TreeCache, then "key" must be a tuple, but
may be of lower cardinality than the TreeCache - in which case the whole
subtree is deleted.
"""
popped = cache.pop(key, None)
if popped is None:
return
# for each deleted node, we now need to remove it from the linked list
# and run its callbacks.
for leaf in iterate_tree_cache_entry(popped):
delete_node(leaf)
@synchronized
def cache_clear() -> None:
for node in cache.values():
node.run_and_clear_callbacks()
node.drop_from_lists()
assert list_root.next_node == list_root
assert list_root.prev_node == list_root
cache.clear()
if size_callback:
cached_cache_len[0] = 0
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.clear_memory_usage()
@synchronized
def cache_contains(key: KT) -> bool:
return key in cache
self.sentinel = object()
# make sure that we clear out any excess entries after we get resized.
self._on_resize = evict
self.get = cache_get
self.set = cache_set
self.setdefault = cache_set_default
self.pop = cache_pop
self.del_multi = cache_del_multi
# `invalidate` is exposed for consistency with DeferredCache, so that it can be
# invalidated by the cache invalidation replication stream.
self.invalidate = cache_del_multi
self.len = synchronized(cache_len)
self.contains = cache_contains
self.clear = cache_clear
def __getitem__(self, key):
result = self.get(key, self.sentinel)
if result is self.sentinel:
raise KeyError()
else:
return result
def __setitem__(self, key, value):
self.set(key, value)
def __delitem__(self, key, value):
result = self.pop(key, self.sentinel)
if result is self.sentinel:
raise KeyError()
def __len__(self):
return self.len()
def __contains__(self, key):
return self.contains(key)
def set_cache_factor(self, factor: float) -> bool:
"""
Set the cache factor for this individual cache.
This will trigger a resize if it changes, which may require evicting
items from the cache.
Returns:
bool: Whether the cache changed size or not.
"""
if not self.apply_cache_factor_from_config:
return False
new_size = int(self._original_max_size * factor)
if new_size != self.max_size:
self.max_size = new_size
if self._on_resize:
self._on_resize()
return True
return False
def __del__(self) -> None:
# We're about to be deleted, so we make sure to clear up all the nodes
# and run callbacks, etc.
#
# This happens e.g. in the sync code where we have an expiring cache of
# lru caches.
self.clear()
|