summary refs log tree commit diff
diff options
context:
space:
mode:
authorPatrick Cloke <clokep@users.noreply.github.com>2023-11-07 14:00:25 -0500
committerGitHub <noreply@github.com>2023-11-07 14:00:25 -0500
commit9738b1c4975b293a1bc25ee27b5527724038baa1 (patch)
tree43c74a577da2f5eeda0f6a2806910de44163a57a
parentMore tests for the simple_* methods. (#16596) (diff)
downloadsynapse-9738b1c4975b293a1bc25ee27b5527724038baa1.tar.xz
Avoid executing no-op queries. (#16583)
If simple_{insert,upsert,update}_many_txn is called without any data
to modify then return instead of executing the query.

This matches the behavior of simple_{select,delete}_many_txn.
-rw-r--r--changelog.d/16583.misc1
-rw-r--r--synapse/storage/database.py32
-rw-r--r--synapse/storage/databases/main/devices.py2
-rw-r--r--synapse/storage/databases/main/events.py12
-rw-r--r--synapse/storage/databases/main/room.py2
-rw-r--r--synapse/storage/databases/main/search.py4
-rw-r--r--tests/storage/test_base.py25
7 files changed, 39 insertions, 39 deletions
diff --git a/changelog.d/16583.misc b/changelog.d/16583.misc
new file mode 100644
index 0000000000..df5b27b112
--- /dev/null
+++ b/changelog.d/16583.misc
@@ -0,0 +1 @@
+Avoid executing no-op queries.
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index abc7d8a5d2..792f2e7cdf 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -1117,7 +1117,7 @@ class DatabasePool:
         txn: LoggingTransaction,
         table: str,
         keys: Collection[str],
-        values: Iterable[Iterable[Any]],
+        values: Collection[Iterable[Any]],
     ) -> None:
         """Executes an INSERT query on the named table.
 
@@ -1130,6 +1130,9 @@ class DatabasePool:
             keys: list of column names
             values: for each row, a list of values in the same order as `keys`
         """
+        # If there's nothing to insert, then skip executing the query.
+        if not values:
+            return
 
         if isinstance(txn.database_engine, PostgresEngine):
             # We use `execute_values` as it can be a lot faster than `execute_batch`,
@@ -1455,7 +1458,7 @@ class DatabasePool:
         key_names: Collection[str],
         key_values: Collection[Iterable[Any]],
         value_names: Collection[str],
-        value_values: Iterable[Iterable[Any]],
+        value_values: Collection[Iterable[Any]],
     ) -> None:
         """
         Upsert, many times.
@@ -1468,6 +1471,19 @@ class DatabasePool:
             value_values: A list of each row's value column values.
                 Ignored if value_names is empty.
         """
+        # If there's nothing to upsert, then skip executing the query.
+        if not key_values:
+            return
+
+        # No value columns, therefore make a blank list so that the following
+        # zip() works correctly.
+        if not value_names:
+            value_values = [() for x in range(len(key_values))]
+        elif len(value_values) != len(key_values):
+            raise ValueError(
+                f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number."
+            )
+
         if table not in self._unsafe_to_upsert_tables:
             return self.simple_upsert_many_txn_native_upsert(
                 txn, table, key_names, key_values, value_names, value_values
@@ -1502,10 +1518,6 @@ class DatabasePool:
             value_values: A list of each row's value column values.
                 Ignored if value_names is empty.
         """
-        # No value columns, therefore make a blank list so that the following
-        # zip() works correctly.
-        if not value_names:
-            value_values = [() for x in range(len(key_values))]
 
         # Lock the table just once, to prevent it being done once per row.
         # Note that, according to Postgres' documentation, once obtained,
@@ -1543,10 +1555,7 @@ class DatabasePool:
         allnames.extend(value_names)
 
         if not value_names:
-            # No value columns, therefore make a blank list so that the
-            # following zip() works correctly.
             latter = "NOTHING"
-            value_values = [() for x in range(len(key_values))]
         else:
             latter = "UPDATE SET " + ", ".join(
                 k + "=EXCLUDED." + k for k in value_names
@@ -1910,6 +1919,7 @@ class DatabasePool:
         Returns:
             The results as a list of tuples.
         """
+        # If there's nothing to select, then skip executing the query.
         if not iterable:
             return []
 
@@ -2044,6 +2054,9 @@ class DatabasePool:
             raise ValueError(
                 f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number."
             )
+        # If there is nothing to update, then skip executing the query.
+        if not key_values:
+            return
 
         # List of tuples of (value values, then key values)
         # (This matches the order needed for the query)
@@ -2278,6 +2291,7 @@ class DatabasePool:
         Returns:
             Number rows deleted
         """
+        # If there's nothing to delete, then skip executing the query.
         if not values:
             return 0
 
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index b0811a4cf1..04d12a876c 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -703,7 +703,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
             key_names=("destination", "user_id"),
             key_values=[(destination, user_id) for user_id, _ in rows],
             value_names=("stream_id",),
-            value_values=((stream_id,) for _, stream_id in rows),
+            value_values=[(stream_id,) for _, stream_id in rows],
         )
 
         # Delete all sent outbound pokes
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 647ba182f6..7c34bde3e5 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1476,7 +1476,7 @@ class PersistEventsStore:
             txn,
             table="event_json",
             keys=("event_id", "room_id", "internal_metadata", "json", "format_version"),
-            values=(
+            values=[
                 (
                     event.event_id,
                     event.room_id,
@@ -1485,7 +1485,7 @@ class PersistEventsStore:
                     event.format_version,
                 )
                 for event, _ in events_and_contexts
-            ),
+            ],
         )
 
         self.db_pool.simple_insert_many_txn(
@@ -1508,7 +1508,7 @@ class PersistEventsStore:
                 "state_key",
                 "rejection_reason",
             ),
-            values=(
+            values=[
                 (
                     self._instance_name,
                     event.internal_metadata.stream_ordering,
@@ -1527,7 +1527,7 @@ class PersistEventsStore:
                     context.rejected,
                 )
                 for event, context in events_and_contexts
-            ),
+            ],
         )
 
         # If we're persisting an unredacted event we go and ensure
@@ -1550,11 +1550,11 @@ class PersistEventsStore:
             txn,
             table="state_events",
             keys=("event_id", "room_id", "type", "state_key"),
-            values=(
+            values=[
                 (event.event_id, event.room_id, event.type, event.state_key)
                 for event, _ in events_and_contexts
                 if event.is_state()
-            ),
+            ],
         )
 
     def _store_rejected_events_txn(
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 6d4b9891e7..afb880532e 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -2268,7 +2268,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
             txn,
             table="partial_state_rooms_servers",
             keys=("room_id", "server_name"),
-            values=((room_id, s) for s in servers),
+            values=[(room_id, s) for s in servers],
         )
         self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
         self._invalidate_cache_and_stream(
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index dbde9130c6..f4bef4c99b 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -106,7 +106,7 @@ class SearchWorkerStore(SQLBaseStore):
                 txn,
                 table="event_search",
                 keys=("event_id", "room_id", "key", "value"),
-                values=(
+                values=[
                     (
                         entry.event_id,
                         entry.room_id,
@@ -114,7 +114,7 @@ class SearchWorkerStore(SQLBaseStore):
                         _clean_value_for_search(entry.value),
                     )
                     for entry in entries
-                ),
+                ],
             )
 
         else:
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index b4c490b568..de4fcfe026 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -189,17 +189,9 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         )
 
         if USE_POSTGRES_FOR_TESTS:
-            self.mock_execute_values.assert_called_once_with(
-                self.mock_txn,
-                "INSERT INTO tablename (col1, col2) VALUES ?",
-                [],
-                template=None,
-                fetch=False,
-            )
+            self.mock_execute_values.assert_not_called()
         else:
-            self.mock_txn.executemany.assert_called_once_with(
-                "INSERT INTO tablename (col1, col2) VALUES(?, ?)", []
-            )
+            self.mock_txn.executemany.assert_not_called()
 
     @defer.inlineCallbacks
     def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, None]:
@@ -393,7 +385,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             )
 
     @defer.inlineCallbacks
-    def test_update_many_no_values(
+    def test_update_many_no_iterable(
         self,
     ) -> Generator["defer.Deferred[object]", object, None]:
         yield defer.ensureDeferred(
@@ -408,16 +400,9 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         )
 
         if USE_POSTGRES_FOR_TESTS:
-            self.mock_execute_batch.assert_called_once_with(
-                self.mock_txn,
-                "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?",
-                [],
-            )
+            self.mock_execute_batch.assert_not_called()
         else:
-            self.mock_txn.executemany.assert_called_once_with(
-                "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?",
-                [],
-            )
+            self.mock_txn.executemany.assert_not_called()
 
     @defer.inlineCallbacks
     def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]: