summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--changelog.d/14453.bugfix1
-rw-r--r--synapse/storage/databases/main/receipts.py171
-rw-r--r--tests/storage/databases/main/test_receipts.py209
3 files changed, 357 insertions, 24 deletions
diff --git a/changelog.d/14453.bugfix b/changelog.d/14453.bugfix
new file mode 100644
index 0000000000..4969e5450c
--- /dev/null
+++ b/changelog.d/14453.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier.
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index dc6989527e..fbf27497ec 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -113,24 +113,6 @@ class ReceiptsWorkerStore(SQLBaseStore):
             prefilled_cache=receipts_stream_prefill,
         )
 
-        self.db_pool.updates.register_background_index_update(
-            "receipts_linearized_unique_index",
-            index_name="receipts_linearized_unique_index",
-            table="receipts_linearized",
-            columns=["room_id", "receipt_type", "user_id"],
-            where_clause="thread_id IS NULL",
-            unique=True,
-        )
-
-        self.db_pool.updates.register_background_index_update(
-            "receipts_graph_unique_index",
-            index_name="receipts_graph_unique_index",
-            table="receipts_graph",
-            columns=["room_id", "receipt_type", "user_id"],
-            where_clause="thread_id IS NULL",
-            unique=True,
-        )
-
     def get_max_receipt_stream_id(self) -> int:
         """Get the current max stream ID for receipts stream"""
         return self._receipts_id_gen.get_current_token()
@@ -702,9 +684,6 @@ class ReceiptsWorkerStore(SQLBaseStore):
                 "data": json_encoder.encode(data),
             },
             where_clause=where_clause,
-            # receipts_linearized has a unique constraint on
-            # (user_id, room_id, receipt_type), so no need to lock
-            lock=False,
         )
 
         return rx_ts
@@ -862,14 +841,13 @@ class ReceiptsWorkerStore(SQLBaseStore):
                 "data": json_encoder.encode(data),
             },
             where_clause=where_clause,
-            # receipts_graph has a unique constraint on
-            # (user_id, room_id, receipt_type), so no need to lock
-            lock=False,
         )
 
 
 class ReceiptsBackgroundUpdateStore(SQLBaseStore):
     POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering"
+    RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME = "receipts_linearized_unique_index"
+    RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME = "receipts_graph_unique_index"
 
     def __init__(
         self,
@@ -883,6 +861,14 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
             self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING,
             self._populate_receipt_event_stream_ordering,
         )
+        self.db_pool.updates.register_background_update_handler(
+            self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME,
+            self._background_receipts_linearized_unique_index,
+        )
+        self.db_pool.updates.register_background_update_handler(
+            self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME,
+            self._background_receipts_graph_unique_index,
+        )
 
     async def _populate_receipt_event_stream_ordering(
         self, progress: JsonDict, batch_size: int
@@ -938,6 +924,143 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
 
         return batch_size
 
+    async def _create_receipts_index(self, index_name: str, table: str) -> None:
+        """Adds a unique index on `(room_id, receipt_type, user_id)` to the given
+        receipts table, for non-thread receipts."""
+
+        def _create_index(conn: LoggingDatabaseConnection) -> None:
+            conn.rollback()
+
+            # we have to set autocommit, because postgres refuses to
+            # CREATE INDEX CONCURRENTLY without it.
+            if isinstance(self.database_engine, PostgresEngine):
+                conn.set_session(autocommit=True)
+
+            try:
+                c = conn.cursor()
+
+                # Now that the duplicates are gone, we can create the index.
+                concurrently = (
+                    "CONCURRENTLY"
+                    if isinstance(self.database_engine, PostgresEngine)
+                    else ""
+                )
+                sql = f"""
+                    CREATE UNIQUE INDEX {concurrently} {index_name}
+                    ON {table}(room_id, receipt_type, user_id)
+                    WHERE thread_id IS NULL
+                """
+                c.execute(sql)
+            finally:
+                if isinstance(self.database_engine, PostgresEngine):
+                    conn.set_session(autocommit=False)
+
+        await self.db_pool.runWithConnection(_create_index)
+
+    async def _background_receipts_linearized_unique_index(
+        self, progress: dict, batch_size: int
+    ) -> int:
+        """Removes duplicate receipts and adds a unique index on
+        `(room_id, receipt_type, user_id)` to `receipts_linearized`, for non-thread
+        receipts."""
+
+        def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None:
+            # Identify any duplicate receipts arising from
+            # https://github.com/matrix-org/synapse/issues/14406.
+            # We expect the following query to use the per-thread receipt index and take
+            # less than a minute.
+            sql = """
+                SELECT MAX(stream_id), room_id, receipt_type, user_id
+                FROM receipts_linearized
+                WHERE thread_id IS NULL
+                GROUP BY room_id, receipt_type, user_id
+                HAVING COUNT(*) > 1
+            """
+            txn.execute(sql)
+            duplicate_keys = cast(List[Tuple[int, str, str, str]], list(txn))
+
+            # Then remove duplicate receipts, keeping the one with the highest
+            # `stream_id`. There should only be a single receipt with any given
+            # `stream_id`.
+            for max_stream_id, room_id, receipt_type, user_id in duplicate_keys:
+                sql = """
+                    DELETE FROM receipts_linearized
+                    WHERE
+                        room_id = ? AND
+                        receipt_type = ? AND
+                        user_id = ? AND
+                        thread_id IS NULL AND
+                        stream_id < ?
+                """
+                txn.execute(sql, (room_id, receipt_type, user_id, max_stream_id))
+
+        await self.db_pool.runInteraction(
+            self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME,
+            _remote_duplicate_receipts_txn,
+        )
+
+        await self._create_receipts_index(
+            "receipts_linearized_unique_index",
+            "receipts_linearized",
+        )
+
+        await self.db_pool.updates._end_background_update(
+            self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME
+        )
+
+        return 1
+
+    async def _background_receipts_graph_unique_index(
+        self, progress: dict, batch_size: int
+    ) -> int:
+        """Removes duplicate receipts and adds a unique index on
+        `(room_id, receipt_type, user_id)` to `receipts_graph`, for non-thread
+        receipts."""
+
+        def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None:
+            # Identify any duplicate receipts arising from
+            # https://github.com/matrix-org/synapse/issues/14406.
+            # We expect the following query to use the per-thread receipt index and take
+            # less than a minute.
+            sql = """
+                SELECT room_id, receipt_type, user_id FROM receipts_graph
+                WHERE thread_id IS NULL
+                GROUP BY room_id, receipt_type, user_id
+                HAVING COUNT(*) > 1
+            """
+            txn.execute(sql)
+            duplicate_keys = cast(List[Tuple[str, str, str]], list(txn))
+
+            # Then remove all duplicate receipts.
+            # We could be clever and try to keep the latest receipt out of every set of
+            # duplicates, but it's far simpler to remove them all.
+            for room_id, receipt_type, user_id in duplicate_keys:
+                sql = """
+                    DELETE FROM receipts_graph
+                    WHERE
+                        room_id = ? AND
+                        receipt_type = ? AND
+                        user_id = ? AND
+                        thread_id IS NULL
+                """
+                txn.execute(sql, (room_id, receipt_type, user_id))
+
+        await self.db_pool.runInteraction(
+            self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME,
+            _remote_duplicate_receipts_txn,
+        )
+
+        await self._create_receipts_index(
+            "receipts_graph_unique_index",
+            "receipts_graph",
+        )
+
+        await self.db_pool.updates._end_background_update(
+            self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME
+        )
+
+        return 1
+
 
 class ReceiptsStore(ReceiptsWorkerStore, ReceiptsBackgroundUpdateStore):
     pass
diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py
new file mode 100644
index 0000000000..c4f12d81d7
--- /dev/null
+++ b/tests/storage/databases/main/test_receipts.py
@@ -0,0 +1,209 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict, Optional, Sequence, Tuple
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
+from synapse.util import Clock
+
+from tests.unittest import HomeserverTestCase
+
+
+class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
+
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
+        self.store = hs.get_datastores().main
+        self.user_id = self.register_user("foo", "pass")
+        self.token = self.login("foo", "pass")
+        self.room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+        self.other_room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+    def _test_background_receipts_unique_index(
+        self,
+        update_name: str,
+        index_name: str,
+        table: str,
+        receipts: Dict[Tuple[str, str, str], Sequence[Dict[str, Any]]],
+        expected_unique_receipts: Dict[Tuple[str, str, str], Optional[Dict[str, Any]]],
+    ):
+        """Test that the background update to uniqueify non-thread receipts in
+        the given receipts table works properly.
+
+        Args:
+            update_name: The name of the background update to test.
+            index_name: The name of the index that the background update creates.
+            table: The table of receipts that the background update fixes.
+            receipts: The test data containing duplicate receipts.
+                A list of receipt rows to insert, grouped by
+                `(room_id, receipt_type, user_id)`.
+            expected_unique_receipts: A dictionary of `(room_id, receipt_type, user_id)`
+                keys and expected receipt key-values after duplicate receipts have been
+                removed.
+        """
+        # First, undo the background update.
+        def drop_receipts_unique_index(txn: LoggingTransaction) -> None:
+            txn.execute(f"DROP INDEX IF EXISTS {index_name}")
+
+        self.get_success(
+            self.store.db_pool.runInteraction(
+                "drop_receipts_unique_index",
+                drop_receipts_unique_index,
+            )
+        )
+
+        # Populate the receipts table, including duplicates.
+        for (room_id, receipt_type, user_id), rows in receipts.items():
+            for row in rows:
+                self.get_success(
+                    self.store.db_pool.simple_insert(
+                        table,
+                        {
+                            "room_id": room_id,
+                            "receipt_type": receipt_type,
+                            "user_id": user_id,
+                            "thread_id": None,
+                            "data": "{}",
+                            **row,
+                        },
+                    )
+                )
+
+        # Insert and run the background update.
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": update_name,
+                    "progress_json": "{}",
+                },
+            )
+        )
+
+        self.store.db_pool.updates._all_done = False
+
+        self.wait_for_background_updates()
+
+        # Check that the remaining receipts match expectations.
+        for (
+            room_id,
+            receipt_type,
+            user_id,
+        ), expected_row in expected_unique_receipts.items():
+            # Include the receipt key in the returned columns, for more informative
+            # assertion messages.
+            columns = ["room_id", "receipt_type", "user_id"]
+            if expected_row is not None:
+                columns += expected_row.keys()
+
+            rows = self.get_success(
+                self.store.db_pool.simple_select_list(
+                    table=table,
+                    keyvalues={
+                        "room_id": room_id,
+                        "receipt_type": receipt_type,
+                        "user_id": user_id,
+                        # `simple_select_onecol` does not support NULL filters,
+                        # so skip the filter on `thread_id`.
+                    },
+                    retcols=columns,
+                    desc="get_receipt",
+                )
+            )
+
+            if expected_row is not None:
+                self.assertEqual(
+                    len(rows),
+                    1,
+                    f"Background update did not leave behind latest receipt in {table}",
+                )
+                self.assertEqual(
+                    rows[0],
+                    {
+                        "room_id": room_id,
+                        "receipt_type": receipt_type,
+                        "user_id": user_id,
+                        **expected_row,
+                    },
+                )
+            else:
+                self.assertEqual(
+                    len(rows),
+                    0,
+                    f"Background update did not remove all duplicate receipts from {table}",
+                )
+
+    def test_background_receipts_linearized_unique_index(self):
+        """Test that the background update to uniqueify non-thread receipts in
+        `receipts_linearized` works properly.
+        """
+        self._test_background_receipts_unique_index(
+            "receipts_linearized_unique_index",
+            "receipts_linearized_unique_index",
+            "receipts_linearized",
+            receipts={
+                (self.room_id, "m.read", self.user_id): [
+                    {"stream_id": 5, "event_id": "$some_event"},
+                    {"stream_id": 6, "event_id": "$some_event"},
+                ],
+                (self.other_room_id, "m.read", self.user_id): [
+                    {"stream_id": 7, "event_id": "$some_event"}
+                ],
+            },
+            expected_unique_receipts={
+                (self.room_id, "m.read", self.user_id): {"stream_id": 6},
+                (self.other_room_id, "m.read", self.user_id): {"stream_id": 7},
+            },
+        )
+
+    def test_background_receipts_graph_unique_index(self):
+        """Test that the background update to uniqueify non-thread receipts in
+        `receipts_graph` works properly.
+        """
+        self._test_background_receipts_unique_index(
+            "receipts_graph_unique_index",
+            "receipts_graph_unique_index",
+            "receipts_graph",
+            receipts={
+                (self.room_id, "m.read", self.user_id): [
+                    {
+                        "event_ids": '["$some_event"]',
+                    },
+                    {
+                        "event_ids": '["$some_event"]',
+                    },
+                ],
+                (self.other_room_id, "m.read", self.user_id): [
+                    {
+                        "event_ids": '["$some_event"]',
+                    }
+                ],
+            },
+            expected_unique_receipts={
+                (self.room_id, "m.read", self.user_id): None,
+                (self.other_room_id, "m.read", self.user_id): {
+                    "event_ids": '["$some_event"]'
+                },
+            },
+        )