summary refs log tree commit diff
path: root/synapse/storage
diff options
context:
space:
mode:
Diffstat (limited to 'synapse/storage')
-rw-r--r--synapse/storage/client_ips.py2
-rw-r--r--synapse/storage/event_push_actions.py8
-rw-r--r--synapse/storage/events.py9
-rw-r--r--synapse/storage/roommember.py2
-rw-r--r--synapse/storage/search.py2
-rw-r--r--synapse/storage/state.py5
6 files changed, 18 insertions, 10 deletions
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py

index a03d1d6104..fba3ecc09c 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py
@@ -28,7 +28,7 @@ logger = logging.getLogger(__name__) # Number of msec of granularity to store the user IP 'last seen' time. Smaller # times give more inserts into the database even for readonly API hits # 120 seconds == 2 minutes -LAST_SEEN_GRANULARITY = 120 * 1000 +LAST_SEEN_GRANULARITY = 10 * 60 * 1000 class ClientIpStore(background_updates.BackgroundUpdateStore): diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index 8efe2fd4bb..00a79fc5ac 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py
@@ -87,6 +87,8 @@ class EventPushActionsStore(SQLBaseStore): self._rotate_notif_loop = self._clock.looping_call( self._rotate_notifs, 30 * 60 * 1000 ) + self._rotate_delay = 3 + self._rotate_count = 10000 def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples): """ @@ -629,7 +631,7 @@ class EventPushActionsStore(SQLBaseStore): ) if caught_up: break - yield sleep(5) + yield sleep(self._rotate_delay) finally: self._doing_notif_rotation = False @@ -650,8 +652,8 @@ class EventPushActionsStore(SQLBaseStore): txn.execute(""" SELECT stream_ordering FROM event_push_actions WHERE stream_ordering > ? - ORDER BY stream_ordering ASC LIMIT 1 OFFSET 50000 - """, (old_rotate_stream_ordering,)) + ORDER BY stream_ordering ASC LIMIT 1 OFFSET ? + """, (old_rotate_stream_ordering, self._rotate_count)) stream_row = txn.fetchone() if stream_row: offset_stream_ordering, = stream_row diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 28cce2979c..16c97c4b09 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py
@@ -1043,7 +1043,6 @@ class EventsStore(SQLBaseStore): "event_edge_hashes", "event_edges", "event_forward_extremities", - "event_push_actions", "event_reference_hashes", "event_search", "event_signatures", @@ -1063,6 +1062,14 @@ class EventsStore(SQLBaseStore): [(ev.event_id,) for ev, _ in events_and_contexts] ) + for table in ( + "event_push_actions", + ): + txn.executemany( + "DELETE FROM %s WHERE room_id = ? AND event_id = ?" % (table,), + [(ev.event_id,) for ev, _ in events_and_contexts] + ) + def _store_event_txn(self, txn, events_and_contexts): """Insert new events into the event and event_json tables diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 3e77fd3901..75ac06da85 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py
@@ -675,7 +675,7 @@ class RoomMemberStore(SQLBaseStore): defer.returnValue(result) - @cached(max_entries=10000, iterable=True) + @cached(max_entries=10000) def _get_joined_hosts_cache(self, room_id): return _JoinedHostsCache(self, room_id) diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index 2755acff40..52ad196e9e 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py
@@ -719,7 +719,7 @@ def _parse_query(database_engine, search_term): results = re.findall(r"([\w\-]+)", search_term, re.UNICODE) if isinstance(database_engine, PostgresEngine): - return " & ".join(result + ":*" for result in results) + return " & ".join(result for result in results) elif isinstance(database_engine, Sqlite3Engine): return " & ".join(result + "*" for result in results) else: diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 2b325e1c1f..43e75bf0a0 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py
@@ -54,7 +54,7 @@ class StateGroupWorkerStore(SQLBaseStore): super(StateGroupWorkerStore, self).__init__(db_conn, hs) self._state_group_cache = DictionaryCache( - "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR + "*stateGroupCache*", 500000 * CACHE_SIZE_FACTOR ) @cached(max_entries=100000, iterable=True) @@ -546,8 +546,7 @@ class StateGroupWorkerStore(SQLBaseStore): state_dict = results[group] state_dict.update( - ((intern_string(k[0]), intern_string(k[1])), to_ascii(v)) - for k, v in group_state_dict.iteritems() + group_state_dict.iteritems() ) self._state_group_cache.update(