summary refs log tree commit diff
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2022-07-06 12:09:19 +0100
committerGitHub <noreply@github.com>2022-07-06 12:09:19 +0100
commita0f51b059c2aa1bbe0a2d6991c369cba5cf43c0a (patch)
treeebf0f65f0e0b1bafd830a63bc0003d4ad8392e2a
parentHandle race between persisting an event and un-partial stating a room (#13100) (diff)
downloadsynapse-a0f51b059c2aa1bbe0a2d6991c369cba5cf43c0a.tar.xz
Fix bug where we failed to delete old push actions (#13194)
This happened if we encountered a stream ordering in `event_push_actions` that had more rows than the batch size of the delete, as If we don't delete any rows in an iteration then the next time round we get the exact same stream ordering and get stuck.
-rw-r--r--changelog.d/13194.bugfix1
-rw-r--r--synapse/storage/databases/main/event_push_actions.py6
2 files changed, 5 insertions, 2 deletions
diff --git a/changelog.d/13194.bugfix b/changelog.d/13194.bugfix
new file mode 100644
index 0000000000..2c2e8bb21b
--- /dev/null
+++ b/changelog.d/13194.bugfix
@@ -0,0 +1 @@
+Fix bug where rows were not deleted from `event_push_actions` table on large servers. Introduced in v1.62.0.
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 32536430aa..a3edcbb398 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -1114,7 +1114,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
             txn.execute(
                 """
                 SELECT stream_ordering FROM event_push_actions
-                WHERE stream_ordering < ? AND highlight = 0
+                WHERE stream_ordering <= ? AND highlight = 0
                 ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
             """,
                 (
@@ -1129,10 +1129,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
             else:
                 stream_ordering = max_stream_ordering_to_delete
 
+            # We need to use a inclusive bound here to handle the case where a
+            # single stream ordering has more than `batch_size` rows.
             txn.execute(
                 """
                 DELETE FROM event_push_actions
-                WHERE stream_ordering < ? AND highlight = 0
+                WHERE stream_ordering <= ? AND highlight = 0
                 """,
                 (stream_ordering,),
             )