summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Cargo.toml4
-rw-r--r--rust/src/lru_cache.rs38
-rw-r--r--synapse/util/caches/lrucache.py28
3 files changed, 37 insertions, 33 deletions
diff --git a/Cargo.toml b/Cargo.toml
index de141bdee9..f2f3f4d9e3 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,3 +3,7 @@
 
 [workspace]
 members = ["rust"]
+
+
+[profile.release]
+debug = true
diff --git a/rust/src/lru_cache.rs b/rust/src/lru_cache.rs
index ac36e9162d..847f5d84be 100644
--- a/rust/src/lru_cache.rs
+++ b/rust/src/lru_cache.rs
@@ -48,7 +48,7 @@ struct LruCacheNodeInner {
     per_cache_list: Arc<Mutex<LinkedList<LruCacheNodeAdapterPerCache>>>,
     cache: Mutex<Option<PyObject>>,
     key: PyObject,
-    value: PyObject,
+    value: Arc<Mutex<PyObject>>,
     callbacks: Py<PySet>,
     memory: usize,
 }
@@ -73,7 +73,7 @@ impl LruCacheNode {
             per_cache_list: cache_list.0,
             cache: Mutex::new(Some(cache)),
             key,
-            value,
+            value: Arc::new(Mutex::new(value)),
             callbacks,
             memory,
         });
@@ -91,35 +91,34 @@ impl LruCacheNode {
         LruCacheNode(node)
     }
 
-    fn add_callbacks(&self, py: Python<'_>, callbacks: Py<PySet>) -> PyResult<()> {
-        let new_callbacks = callbacks.as_ref(py);
+    fn add_callbacks(&self, py: Python<'_>, new_callbacks: &PyAny) -> PyResult<()> {
+        if new_callbacks.len()? == 0 {
+            return Ok(());
+        }
+
         let current_callbacks = self.0.callbacks.as_ref(py);
 
-        for cb in new_callbacks {
-            current_callbacks.add(cb)?;
+        for cb in new_callbacks.iter()? {
+            current_callbacks.add(cb?)?;
         }
 
         Ok(())
     }
 
     fn run_and_clear_callbacks(&self, py: Python<'_>) {
-        let current_callbacks = self.0.callbacks.as_ref(py);
+        let callbacks = self.0.callbacks.as_ref(py);
 
-        if current_callbacks.len() == 0 {
+        if callbacks.is_empty() {
             return;
         }
 
-        // Swap out the stored callbacks with an empty list
-        let callbacks = std::mem::replace(&mut *callback_guard, Vec::new());
-
-        // Drop the lock
-        std::mem::drop(callback_guard);
-
         for callback in callbacks {
-            if let Err(err) = callback.call0(py) {
+            if let Err(err) = callback.call0() {
                 error!("LruCacheNode callback errored: {err}");
             }
         }
+
+        callbacks.clear();
     }
 
     fn drop_from_cache(&self) -> PyResult<()> {
@@ -195,8 +194,13 @@ impl LruCacheNode {
     }
 
     #[getter]
-    fn value(&self) -> &PyObject {
-        &self.0.value
+    fn value(&self) -> PyObject {
+        self.0.value.lock().expect("poisoned").clone()
+    }
+
+    #[setter]
+    fn set_value(&self, value: PyObject) {
+        *self.0.value.lock().expect("poisoned") = value
     }
 
     #[getter]
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index aa93109d13..895594adbe 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -44,6 +44,7 @@ from twisted.internet.interfaces import IReactorTime
 from synapse.config import cache as cache_config
 from synapse.metrics.background_process_metrics import wrap_as_background_process
 from synapse.metrics.jemalloc import get_jemalloc_stats
+from synapse.synapse_rust.lru_cache import LruCacheNode, PerCacheLinkedList
 from synapse.util import Clock, caches
 from synapse.util.caches import CacheMetric, EvictionReason, register_cache
 from synapse.util.caches.treecache import (
@@ -456,25 +457,21 @@ class LruCache(Generic[KT, VT]):
 
         list_root = ListNode[_Node[KT, VT]].create_root_node()
 
+        rust_linked_list = PerCacheLinkedList()
+
         lock = threading.Lock()
 
         def evict() -> None:
             while cache_len() > self.max_size:
                 # Get the last node in the list (i.e. the oldest node).
-                todelete = list_root.prev_node
+                todelete = rust_linked_list.get_back()
 
                 # The list root should always have a valid `prev_node` if the
                 # cache is not empty.
                 assert todelete is not None
 
-                # The node should always have a reference to a cache entry, as
-                # we only drop the cache entry when we remove the node from the
-                # list.
-                node = todelete.get_cache_entry()
-                assert node is not None
-
-                evicted_len = delete_node(node)
-                cache.pop(node.key, None)
+                evicted_len = delete_node(todelete)
+                cache.pop(todelete.key, None)
                 if metrics:
                     metrics.inc_evictions(EvictionReason.size, evicted_len)
 
@@ -502,14 +499,13 @@ class LruCache(Generic[KT, VT]):
         def add_node(
             key: KT, value: VT, callbacks: Collection[Callable[[], None]] = ()
         ) -> None:
-            node: _Node[KT, VT] = _Node(
-                list_root,
+            node: _Node[KT, VT] = LruCacheNode(
+                self,
+                rust_linked_list,
                 key,
                 value,
-                weak_ref_to_self,
-                real_clock,
-                callbacks,
-                prune_unread_entries,
+                set(callbacks),
+                0,
             )
             cache[key] = node
 
@@ -520,7 +516,7 @@ class LruCache(Generic[KT, VT]):
                 metrics.inc_memory_usage(node.memory)
 
         def move_node_to_front(node: _Node[KT, VT]) -> None:
-            node.move_to_front(real_clock, list_root)
+            node.move_to_front()
 
         def delete_node(node: _Node[KT, VT]) -> int:
             node.drop_from_lists()