summary refs log tree commit diff
path: root/synapse/storage/databases/main
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2022-06-30 15:08:40 +0100
committerGitHub <noreply@github.com>2022-06-30 15:08:40 +0100
commitdbce28b2f14e5a4d0db9f090caf390ea998d1e59 (patch)
tree08788ef44a44ef2a8d57b383dfce9c312879fc88 /synapse/storage/databases/main
parentAdd index to help delete old push actions (#13141) (diff)
downloadsynapse-dbce28b2f14e5a4d0db9f090caf390ea998d1e59.tar.xz
Fix unread counts on large servers (#13140)
Diffstat (limited to 'synapse/storage/databases/main')
-rw-r--r--synapse/storage/databases/main/event_push_actions.py48
1 files changed, 26 insertions, 22 deletions
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 3fadd0930b..7d4754b3d3 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -854,18 +854,20 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
 
         limit = 100
 
-        min_stream_id = self.db_pool.simple_select_one_onecol_txn(
+        min_receipts_stream_id = self.db_pool.simple_select_one_onecol_txn(
             txn,
             table="event_push_summary_last_receipt_stream_id",
             keyvalues={},
             retcol="stream_id",
         )
 
+        max_receipts_stream_id = self._receipts_id_gen.get_current_token()
+
         sql = """
             SELECT r.stream_id, r.room_id, r.user_id, e.stream_ordering
             FROM receipts_linearized AS r
             INNER JOIN events AS e USING (event_id)
-            WHERE r.stream_id > ? AND user_id LIKE ?
+            WHERE ? < r.stream_id AND r.stream_id <= ? AND user_id LIKE ?
             ORDER BY r.stream_id ASC
             LIMIT ?
         """
@@ -877,13 +879,21 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
         txn.execute(
             sql,
             (
-                min_stream_id,
+                min_receipts_stream_id,
+                max_receipts_stream_id,
                 user_filter,
                 limit,
             ),
         )
         rows = txn.fetchall()
 
+        old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
+            txn,
+            table="event_push_summary_stream_ordering",
+            keyvalues={},
+            retcol="stream_ordering",
+        )
+
         # For each new read receipt we delete push actions from before it and
         # recalculate the summary.
         for _, room_id, user_id, stream_ordering in rows:
@@ -902,13 +912,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
                 (room_id, user_id, stream_ordering),
             )
 
-            old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
-                txn,
-                table="event_push_summary_stream_ordering",
-                keyvalues={},
-                retcol="stream_ordering",
-            )
-
             notif_count, unread_count = self._get_notif_unread_count_for_user_room(
                 txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering
             )
@@ -927,18 +930,19 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
 
         # We always update `event_push_summary_last_receipt_stream_id` to
         # ensure that we don't rescan the same receipts for remote users.
-        #
-        # This requires repeatable read to be safe, as we need the
-        # `MAX(stream_id)` to not include any new rows that have been committed
-        # since the start of the transaction (since those rows won't have been
-        # returned by the query above). Alternatively we could query the max
-        # stream ID at the start of the transaction and bound everything by
-        # that.
-        txn.execute(
-            """
-            UPDATE event_push_summary_last_receipt_stream_id
-            SET stream_id = (SELECT COALESCE(MAX(stream_id), 0) FROM receipts_linearized)
-            """
+
+        upper_limit = max_receipts_stream_id
+        if len(rows) >= limit:
+            # If we pulled out a limited number of rows we only update the
+            # position to the last receipt we processed, so we continue
+            # processing the rest next iteration.
+            upper_limit = rows[-1][0]
+
+        self.db_pool.simple_update_txn(
+            txn,
+            table="event_push_summary_last_receipt_stream_id",
+            keyvalues={},
+            updatevalues={"stream_id": upper_limit},
         )
 
         return len(rows) < limit