summary refs log tree commit diff
path: root/synapse/handlers/pagination.py
diff options
context:
space:
mode:
authorMathieu Velten <mathieuv@matrix.org>2023-08-16 16:19:54 +0200
committerGitHub <noreply@github.com>2023-08-16 16:19:54 +0200
commit8c3bcea2da4939e21a99f72d6c3995186bc4b80d (patch)
treeeee47996c192118ab4f0b3af9b50de3d095fb65b /synapse/handlers/pagination.py
parentAdd link explaining ELK stack to structured_logging.md (#16091) (diff)
downloadsynapse-8c3bcea2da4939e21a99f72d6c3995186bc4b80d.tar.xz
Rename pagination&purge locks and add comments explaining them (#16112)
Diffstat (limited to 'synapse/handlers/pagination.py')
-rw-r--r--synapse/handlers/pagination.py19
1 files changed, 12 insertions, 7 deletions
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index da34658470..1be6ebc6d9 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -24,6 +24,7 @@ from synapse.api.errors import SynapseError
 from synapse.api.filtering import Filter
 from synapse.events.utils import SerializeEventConfig
 from synapse.handlers.room import ShutdownRoomResponse
+from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
 from synapse.logging.opentracing import trace
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.rest.admin._base import assert_user_is_admin
@@ -46,9 +47,10 @@ logger = logging.getLogger(__name__)
 BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3
 
 
-PURGE_HISTORY_LOCK_NAME = "purge_history_lock"
-
-DELETE_ROOM_LOCK_NAME = "delete_room_lock"
+# This is used to avoid purging a room several time at the same moment,
+# and also paginating during a purge. Pagination can trigger backfill,
+# which would create old events locally, and would potentially clash with the room delete.
+PURGE_PAGINATION_LOCK_NAME = "purge_pagination_lock"
 
 
 @attr.s(slots=True, auto_attribs=True)
@@ -363,7 +365,7 @@ class PaginationHandler:
         self._purges_in_progress_by_room.add(room_id)
         try:
             async with self._worker_locks.acquire_read_write_lock(
-                PURGE_HISTORY_LOCK_NAME, room_id, write=True
+                PURGE_PAGINATION_LOCK_NAME, room_id, write=True
             ):
                 await self._storage_controllers.purge_events.purge_history(
                     room_id, token, delete_local_events
@@ -421,7 +423,10 @@ class PaginationHandler:
             force: set true to skip checking for joined users.
         """
         async with self._worker_locks.acquire_multi_read_write_lock(
-            [(PURGE_HISTORY_LOCK_NAME, room_id), (DELETE_ROOM_LOCK_NAME, room_id)],
+            [
+                (PURGE_PAGINATION_LOCK_NAME, room_id),
+                (NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id),
+            ],
             write=True,
         ):
             # first check that we have no users in this room
@@ -483,7 +488,7 @@ class PaginationHandler:
         room_token = from_token.room_key
 
         async with self._worker_locks.acquire_read_write_lock(
-            PURGE_HISTORY_LOCK_NAME, room_id, write=False
+            PURGE_PAGINATION_LOCK_NAME, room_id, write=False
         ):
             (membership, member_event_id) = (None, None)
             if not use_admin_priviledge:
@@ -761,7 +766,7 @@ class PaginationHandler:
         self._purges_in_progress_by_room.add(room_id)
         try:
             async with self._worker_locks.acquire_read_write_lock(
-                PURGE_HISTORY_LOCK_NAME, room_id, write=True
+                PURGE_PAGINATION_LOCK_NAME, room_id, write=True
             ):
                 self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
                 self._delete_by_id[