diff options
author | Erik Johnston <erikj@jki.re> | 2017-01-10 15:42:00 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-01-10 15:42:00 +0000 |
commit | d524bc9110040855402f3d8d1510246872dde62c (patch) | |
tree | 7e8b15ed798e4cf0089b0968d23937c86a8bd9c8 | |
parent | Merge pull request #1790 from matrix-org/erikj/linearizer (diff) | |
parent | Limit number of entries to prefill from cache (diff) | |
download | synapse-d524bc9110040855402f3d8d1510246872dde62c.tar.xz |
Merge pull request #1792 from matrix-org/erikj/limit_cache_prefill_device
Limit number of entries to prefill from cache
-rw-r--r-- | synapse/storage/__init__.py | 4 | ||||
-rw-r--r-- | synapse/storage/_base.py | 5 |
2 files changed, 6 insertions, 3 deletions
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index fe936b3e62..e8495f1eb9 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -189,7 +189,8 @@ class DataStore(RoomMemberStore, RoomStore, db_conn, "device_inbox", entity_column="user_id", stream_column="stream_id", - max_value=max_device_inbox_id + max_value=max_device_inbox_id, + limit=1000, ) self._device_inbox_stream_cache = StreamChangeCache( "DeviceInboxStreamChangeCache", min_device_inbox_id, @@ -202,6 +203,7 @@ class DataStore(RoomMemberStore, RoomStore, entity_column="destination", stream_column="stream_id", max_value=max_device_inbox_id, + limit=1000, ) self._device_federation_outbox_stream_cache = StreamChangeCache( "DeviceFederationOutboxStreamChangeCache", min_device_outbox_id, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b62c459d8b..5620a655eb 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -838,18 +838,19 @@ class SQLBaseStore(object): return txn.execute(sql, keyvalues.values()) def _get_cache_dict(self, db_conn, table, entity_column, stream_column, - max_value): + max_value, limit=100000): # Fetch a mapping of room_id -> max stream position for "recent" rooms. # It doesn't really matter how many we get, the StreamChangeCache will # do the right thing to ensure it respects the max size of cache. sql = ( "SELECT %(entity)s, MAX(%(stream)s) FROM %(table)s" - " WHERE %(stream)s > ? - 100000" + " WHERE %(stream)s > ? - %(limit)s" " GROUP BY %(entity)s" ) % { "table": table, "entity": entity_column, "stream": stream_column, + "limit": limit, } sql = self.database_engine.convert_param_style(sql) |