summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
authorDavid Robertson <davidr@element.io>2023-01-24 15:28:20 +0000
committerGitHub <noreply@github.com>2023-01-24 15:28:20 +0000
commit4607be0b7b2165710dc2e5e68ec4281b593ca8c5 (patch)
tree9d5b3ceb98eb23364ee11f81d70b66077e5c5541 /synapse
parentFaster joins: omit partial rooms from eager syncs until the resync completes ... (diff)
downloadsynapse-4607be0b7b2165710dc2e5e68ec4281b593ca8c5.tar.xz
Request partial joins by default (#14905)
* Request partial joins by default

This is a little sloppy, but we are trying to gain confidence in faster
joins in the upcoming RC.

Admins can still opt out by adding the following to their Synapse
config:

```yaml
experimental:
    faster_joins: false
```

We may revert this change before the release proper, depending on how
testing in the wild goes.

* Changelog

* Try to fix the backfill test failures

* Upgrade notes

* Postgres compat?
Diffstat (limited to 'synapse')
-rw-r--r--synapse/config/experimental.py2
-rw-r--r--synapse/storage/databases/main/stream.py40
2 files changed, 35 insertions, 7 deletions
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 89586db763..2590c88cde 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -84,7 +84,7 @@ class ExperimentalConfig(Config):
         # experimental support for faster joins over federation
         # (MSC2775, MSC3706, MSC3895)
         # requires a target server that can provide a partial join response (MSC3706)
-        self.faster_joins_enabled: bool = experimental.get("faster_joins", False)
+        self.faster_joins_enabled: bool = experimental.get("faster_joins", True)
 
         # MSC3720 (Account status endpoint)
         self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 63d8350530..d28fc65df9 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -67,7 +67,7 @@ from synapse.storage.database import (
     make_in_list_sql_clause,
 )
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
-from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
 from synapse.types import PersistedEventPosition, RoomStreamToken
 from synapse.util.caches.descriptors import cached
@@ -944,12 +944,40 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             room_id
             stream_key
         """
-        sql = (
-            "SELECT coalesce(MIN(topological_ordering), 0) FROM events"
-            " WHERE room_id = ? AND stream_ordering >= ?"
-        )
+        if isinstance(self.database_engine, PostgresEngine):
+            min_function = "LEAST"
+        elif isinstance(self.database_engine, Sqlite3Engine):
+            min_function = "MIN"
+        else:
+            raise RuntimeError(f"Unknown database engine {self.database_engine}")
+
+        # This query used to be
+        #    SELECT COALESCE(MIN(topological_ordering), 0) FROM events
+        #    WHERE room_id = ? and events.stream_ordering >= {stream_key}
+        # which returns 0 if the stream_key is newer than any event in
+        # the room. That's not wrong, but it seems to interact oddly with backfill,
+        # requiring a second call to /messages to actually backfill from a remote
+        # homeserver.
+        #
+        # Instead, rollback the stream ordering to that after the most recent event in
+        # this room.
+        sql = f"""
+            WITH fallback(max_stream_ordering) AS (
+                SELECT MAX(stream_ordering)
+                FROM events
+                WHERE room_id = ?
+            )
+            SELECT COALESCE(MIN(topological_ordering), 0) FROM events
+            WHERE
+                room_id = ?
+                AND events.stream_ordering >= {min_function}(
+                    ?,
+                    (SELECT max_stream_ordering FROM fallback)
+                )
+        """
+
         row = await self.db_pool.execute(
-            "get_current_topological_token", None, sql, room_id, stream_key
+            "get_current_topological_token", None, sql, room_id, room_id, stream_key
         )
         return row[0][0] if row else 0