diff options
author | Patrick Cloke <patrickc@matrix.org> | 2022-06-30 13:27:24 -0400 |
---|---|---|
committer | Patrick Cloke <patrickc@matrix.org> | 2022-06-30 13:27:24 -0400 |
commit | b0366853cae1e08d30d44757d142f8670b7ec35c (patch) | |
tree | fc219b70153c400282180f5071c7daccc5b841c4 /synapse/storage/databases/main | |
parent | Don't process /send requests for users who have hit their ratelimit (#13134) (diff) | |
parent | Fix unread counts on large servers (#13140) (diff) | |
download | synapse-b0366853cae1e08d30d44757d142f8670b7ec35c.tar.xz |
Merge remote-tracking branch 'origin/release-v1.62' into develop
Diffstat (limited to 'synapse/storage/databases/main')
-rw-r--r-- | synapse/storage/databases/main/event_push_actions.py | 58 |
1 files changed, 36 insertions, 22 deletions
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index eae41d7484..f432d578b5 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -864,18 +864,20 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas limit = 100 - min_stream_id = self.db_pool.simple_select_one_onecol_txn( + min_receipts_stream_id = self.db_pool.simple_select_one_onecol_txn( txn, table="event_push_summary_last_receipt_stream_id", keyvalues={}, retcol="stream_id", ) + max_receipts_stream_id = self._receipts_id_gen.get_current_token() + sql = """ SELECT r.stream_id, r.room_id, r.user_id, e.stream_ordering FROM receipts_linearized AS r INNER JOIN events AS e USING (event_id) - WHERE r.stream_id > ? AND user_id LIKE ? + WHERE ? < r.stream_id AND r.stream_id <= ? AND user_id LIKE ? ORDER BY r.stream_id ASC LIMIT ? """ @@ -887,13 +889,21 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas txn.execute( sql, ( - min_stream_id, + min_receipts_stream_id, + max_receipts_stream_id, user_filter, limit, ), ) rows = txn.fetchall() + old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( + txn, + table="event_push_summary_stream_ordering", + keyvalues={}, + retcol="stream_ordering", + ) + # For each new read receipt we delete push actions from before it and # recalculate the summary. for _, room_id, user_id, stream_ordering in rows: @@ -912,13 +922,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (room_id, user_id, stream_ordering), ) - old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( - txn, - table="event_push_summary_stream_ordering", - keyvalues={}, - retcol="stream_ordering", - ) - notif_count, unread_count = self._get_notif_unread_count_for_user_room( txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering ) @@ -937,18 +940,19 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # We always update `event_push_summary_last_receipt_stream_id` to # ensure that we don't rescan the same receipts for remote users. - # - # This requires repeatable read to be safe, as we need the - # `MAX(stream_id)` to not include any new rows that have been committed - # since the start of the transaction (since those rows won't have been - # returned by the query above). Alternatively we could query the max - # stream ID at the start of the transaction and bound everything by - # that. - txn.execute( - """ - UPDATE event_push_summary_last_receipt_stream_id - SET stream_id = (SELECT COALESCE(MAX(stream_id), 0) FROM receipts_linearized) - """ + + upper_limit = max_receipts_stream_id + if len(rows) >= limit: + # If we pulled out a limited number of rows we only update the + # position to the last receipt we processed, so we continue + # processing the rest next iteration. + upper_limit = rows[-1][0] + + self.db_pool.simple_update_txn( + txn, + table="event_push_summary_last_receipt_stream_id", + keyvalues={}, + updatevalues={"stream_id": upper_limit}, ) return len(rows) < limit @@ -1199,6 +1203,16 @@ class EventPushActionsStore(EventPushActionsWorkerStore): where_clause="highlight=1", ) + # Add index to make deleting old push actions faster. + self.db_pool.updates.register_background_index_update( + "event_push_actions_stream_highlight_index", + index_name="event_push_actions_stream_highlight_index", + table="event_push_actions", + columns=["highlight", "stream_ordering"], + where_clause="highlight=0", + psql_only=True, + ) + async def get_push_actions_for_user( self, user_id: str, |