diff options
author | Erik Johnston <erik@matrix.org> | 2015-02-06 16:52:22 +0000 |
---|---|---|
committer | Erik Johnston <erik@matrix.org> | 2015-02-06 16:52:22 +0000 |
commit | 5bf318e9a60dd05120e9b78b49042337d38ec414 (patch) | |
tree | 35b01464398a3ee71d53592245e802d8c2f7f95b /synapse/state.py | |
parent | Bugfix cache layer (diff) | |
download | synapse-5bf318e9a60dd05120e9b78b49042337d38ec414.tar.xz |
Bug fixes.
Diffstat (limited to 'synapse/state.py')
-rw-r--r-- | synapse/state.py | 82 |
1 files changed, 55 insertions, 27 deletions
diff --git a/synapse/state.py b/synapse/state.py index 7523573f22..49ac09863e 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -43,6 +43,10 @@ AuthEventTypes = ( ) +SIZE_OF_CACHE = 1000 +EVICTION_TIMEOUT_SECONDS = 20 + + class _StateCacheEntry(object): def __init__(self, state, state_group, ts): self.state = state @@ -59,13 +63,22 @@ class StateHandler(object): self.store = hs.get_datastore() self.hs = hs - # set of event_ids -> _StateCacheEntry. + # dict of set of event_ids -> _StateCacheEntry. + self._state_cache = None + + def start_caching(self): + logger.debug("start_caching") + self._state_cache = {} def f(): - self._prune_cache() + logger.debug("Pruning") + try: + self._prune_cache() + except: + logger.exception("Prune") - self.clock.looping_call(f, 10*1000) + self.clock.looping_call(f, 5*1000) @defer.inlineCallbacks def get_current_state(self, room_id, event_type=None, state_key=""): @@ -86,7 +99,10 @@ class StateHandler(object): for e_id, _, _ in events ] - cache = self._state_cache.get(frozenset(event_ids), None) + cache = None + if self._state_cache is not None: + cache = self._state_cache.get(frozenset(event_ids), None) + if cache: cache.ts = self.clock.time_msec() state = cache.state @@ -199,16 +215,19 @@ class StateHandler(object): """ logger.debug("resolve_state_groups event_ids %s", event_ids) - cache = self._state_cache.get(frozenset(event_ids), None) - if cache and cache.state_group: - cache.ts = self.clock.time_msec() - prev_state = cache.state.get((event_type, state_key), None) - if prev_state: - prev_state = prev_state.event_id - prev_states = [prev_state] - else: - prev_states = [] - defer.returnValue((cache.state_group, cache.state, prev_states)) + if self._state_cache is not None: + cache = self._state_cache.get(frozenset(event_ids), None) + if cache and cache.state_group: + cache.ts = self.clock.time_msec() + prev_state = cache.state.get((event_type, state_key), None) + if prev_state: + prev_state = prev_state.event_id + prev_states = [prev_state] + else: + prev_states = [] + defer.returnValue( + (cache.state_group, cache.state, prev_states) + ) state_groups = yield self.store.get_state_groups( event_ids @@ -233,15 +252,16 @@ class StateHandler(object): else: prev_states = [] - cache = _StateCacheEntry( - state=state, - state_group=name, - ts=self.clock.time_msec() - ) + if self._state_cache is not None: + cache = _StateCacheEntry( + state=state, + state_group=name, + ts=self.clock.time_msec() + ) - self._state_cache[frozenset(event_ids)] = cache + self._state_cache[frozenset(event_ids)] = cache - defer.returnValue((name, state, prev_states)) + defer.returnValue((name, state, prev_states)) state = {} for group, g_state in state_groups.items(): @@ -292,7 +312,8 @@ class StateHandler(object): ts=self.clock.time_msec() ) - self._state_cache[frozenset(event_ids)] = cache + if self._state_cache is not None: + self._state_cache[frozenset(event_ids)] = cache defer.returnValue((None, new_state, prev_states)) @@ -379,26 +400,33 @@ class StateHandler(object): return sorted(events, key=key_func) def _prune_cache(self): - logger.debug("_prune_cache. before len: ", len(self._state_cache)) + logger.debug("_prune_cache") + logger.debug( + "_prune_cache. before len: %d", + len(self._state_cache.keys()) + ) now = self.clock.time_msec() - if len(self._state_cache) > 100: + if len(self._state_cache.keys()) > SIZE_OF_CACHE: sorted_entries = sorted( self._state_cache.items(), key=lambda k, v: v.ts, ) - for k, _ in sorted_entries[100:]: + for k, _ in sorted_entries[SIZE_OF_CACHE:]: self._state_cache.pop(k) keys_to_delete = set() for key, cache_entry in self._state_cache.items(): - if now - cache_entry.ts > 60*1000: + if now - cache_entry.ts > EVICTION_TIMEOUT_SECONDS*1000: keys_to_delete.add(key) for k in keys_to_delete: self._state_cache.pop(k) - logger.debug("_prune_cache. after len: ", len(self._state_cache)) + logger.debug( + "_prune_cache. after len: %d", + len(self._state_cache.keys()) + ) |