summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
authorRichard van der Hoff <richard@matrix.org>2017-05-10 17:46:41 +0100
committerRichard van der Hoff <richard@matrix.org>2017-05-10 18:44:22 +0100
commitb64d312421976162a8d41246f11652b5003bb66f (patch)
tree1ce612eb58a32ba5963c842faa959646df25fa48 /synapse
parentMerge pull request #2208 from matrix-org/erikj/ratelimit_overrid (diff)
downloadsynapse-b64d312421976162a8d41246f11652b5003bb66f.tar.xz
add some logging to purge_history
Diffstat (limited to 'synapse')
-rw-r--r--synapse/storage/events.py25
1 files changed, 21 insertions, 4 deletions
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 2ab44ceaa7..512828cf34 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -2033,6 +2033,8 @@ class EventsStore(SQLBaseStore):
         for event_id, state_key in event_rows:
             txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
 
+        logger.debug("[purge] Finding new backward extremities")
+
         # We calculate the new entries for the backward extremeties by finding
         # all events that point to events that are to be purged
         txn.execute(
@@ -2045,6 +2047,8 @@ class EventsStore(SQLBaseStore):
         )
         new_backwards_extrems = txn.fetchall()
 
+        logger.debug("[purge] replacing backward extremities: %r", new_backwards_extrems)
+
         txn.execute(
             "DELETE FROM event_backward_extremities WHERE room_id = ?",
             (room_id,)
@@ -2059,6 +2063,8 @@ class EventsStore(SQLBaseStore):
             ]
         )
 
+        logger.debug("[purge] finding redundant state groups")
+
         # Get all state groups that are only referenced by events that are
         # to be deleted.
         txn.execute(
@@ -2076,6 +2082,10 @@ class EventsStore(SQLBaseStore):
         state_rows = txn.fetchall()
         state_groups_to_delete = [sg for sg, in state_rows]
 
+        logger.debug(
+            "[purge] finding state groups which depend on redundant state groups"
+        )
+
         # Now we get all the state groups that rely on these state groups
         new_state_edges = []
         chunks = [
@@ -2096,6 +2106,8 @@ class EventsStore(SQLBaseStore):
         # Now we turn the state groups that reference to-be-deleted state groups
         # to non delta versions.
         for new_state_edge in new_state_edges:
+            logger.debug("[purge] de-delta-ing remaining state group %s",
+                         new_state_edge)
             curr_state = self._get_state_groups_from_groups_txn(
                 txn, [new_state_edge], types=None
             )
@@ -2132,6 +2144,7 @@ class EventsStore(SQLBaseStore):
                 ],
             )
 
+        logger.debug("[purge] removing redundant state groups")
         txn.executemany(
             "DELETE FROM state_groups_state WHERE state_group = ?",
             state_rows
@@ -2140,12 +2153,15 @@ class EventsStore(SQLBaseStore):
             "DELETE FROM state_groups WHERE id = ?",
             state_rows
         )
+
         # Delete all non-state
+        logger.debug("[purge] removing events from event_to_state_groups")
         txn.executemany(
             "DELETE FROM event_to_state_groups WHERE event_id = ?",
             [(event_id,) for event_id, _ in event_rows]
         )
 
+        logger.debug("[purge] updating room_depth")
         txn.execute(
             "UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
             (topological_ordering, room_id,)
@@ -2171,16 +2187,15 @@ class EventsStore(SQLBaseStore):
             "event_signatures",
             "rejections",
         ):
+            logger.debug("[purge] removing non-state events from %s", table)
+
             txn.executemany(
                 "DELETE FROM %s WHERE event_id = ?" % (table,),
                 to_delete
             )
 
-        txn.executemany(
-            "DELETE FROM events WHERE event_id = ?",
-            to_delete
-        )
         # Mark all state and own events as outliers
+        logger.debug("[purge] marking events as outliers")
         txn.executemany(
             "UPDATE events SET outlier = ?"
             " WHERE event_id = ?",
@@ -2190,6 +2205,8 @@ class EventsStore(SQLBaseStore):
             ]
         )
 
+        logger.debug("[purge] done")
+
     @defer.inlineCallbacks
     def is_event_after(self, event_id1, event_id2):
         """Returns True if event_id1 is after event_id2 in the stream