diff --git a/synapse/__init__.py b/synapse/__init__.py
index 6b11c5681b..a95753dcc7 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -48,7 +48,7 @@ try:
except ImportError:
pass
-__version__ = "1.20.0rc4"
+__version__ = "1.20.0rc5"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 639d19f696..688d43fffb 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -217,11 +217,9 @@ class FederationClient(FederationBase):
for p in transaction_data["pdus"]
]
- # FIXME: We should handle signature failures more gracefully.
- pdus[:] = await make_deferred_yieldable(
- defer.gatherResults(
- self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True,
- ).addErrback(unwrapFirstError)
+ # Check signatures and hash of pdus, removing any from the list that fail checks
+ pdus[:] = await self._check_sigs_and_hash_and_fetch(
+ dest, pdus, outlier=True, room_version=room_version
)
return pdus
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 96eeff7b1b..ea9264e751 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -917,15 +917,26 @@ class FederationHandler(BaseHandler):
return events
- async def maybe_backfill(self, room_id, current_depth):
+ async def maybe_backfill(
+ self, room_id: str, current_depth: int, limit: int
+ ) -> bool:
"""Checks the database to see if we should backfill before paginating,
and if so do.
+
+ Args:
+ room_id
+ current_depth: The depth from which we're paginating from. This is
+ used to decide if we should backfill and what extremities to
+ use.
+ limit: The number of events that the pagination request will
+ return. This is used as part of the heuristic to decide if we
+ should back paginate.
"""
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
if not extremities:
logger.debug("Not backfilling as no extremeties found.")
- return
+ return False
# We only want to paginate if we can actually see the events we'll get,
# as otherwise we'll just spend a lot of resources to get redacted
@@ -978,16 +989,54 @@ class FederationHandler(BaseHandler):
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
max_depth = sorted_extremeties_tuple[0][1]
+ # If we're approaching an extremity we trigger a backfill, otherwise we
+ # no-op.
+ #
+ # We chose twice the limit here as then clients paginating backwards
+ # will send pagination requests that trigger backfill at least twice
+ # using the most recent extremity before it gets removed (see below). We
+ # chose more than one times the limit in case of failure, but choosing a
+ # much larger factor will result in triggering a backfill request much
+ # earlier than necessary.
+ if current_depth - 2 * limit > max_depth:
+ logger.debug(
+ "Not backfilling as we don't need to. %d < %d - 2 * %d",
+ max_depth,
+ current_depth,
+ limit,
+ )
+ return False
+
+ logger.debug(
+ "room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s",
+ room_id,
+ current_depth,
+ max_depth,
+ sorted_extremeties_tuple,
+ )
+
+ # We ignore extremities that have a greater depth than our current depth
+ # as:
+ # 1. we don't really care about getting events that have happened
+ # before our current position; and
+ # 2. we have likely previously tried and failed to backfill from that
+ # extremity, so to avoid getting "stuck" requesting the same
+ # backfill repeatedly we drop those extremities.
+ filtered_sorted_extremeties_tuple = [
+ t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
+ ]
+
+ # However, we need to check that the filtered extremities are non-empty.
+ # If they are empty then either we can a) bail or b) still attempt to
+ # backill. We opt to try backfilling anyway just in case we do get
+ # relevant events.
+ if filtered_sorted_extremeties_tuple:
+ sorted_extremeties_tuple = filtered_sorted_extremeties_tuple
+
# We don't want to specify too many extremities as it causes the backfill
# request URI to be too long.
extremities = dict(sorted_extremeties_tuple[:5])
- if current_depth > max_depth:
- logger.debug(
- "Not backfilling as we don't need to. %d < %d", max_depth, current_depth
- )
- return
-
# Now we need to decide which hosts to hit first.
# First we try hosts that are already in the room
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index d929a68f7d..f132ed3368 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -358,9 +358,9 @@ class PaginationHandler:
# if we're going backwards, we might need to backfill. This
# requires that we have a topo token.
if room_token.topological:
- max_topo = room_token.topological
+ curr_topo = room_token.topological
else:
- max_topo = await self.store.get_max_topological_token(
+ curr_topo = await self.store.get_current_topological_token(
room_id, room_token.stream
)
@@ -379,13 +379,13 @@ class PaginationHandler:
leave_token = RoomStreamToken.parse(leave_token_str)
assert leave_token.topological is not None
- if leave_token.topological < max_topo:
+ if leave_token.topological < curr_topo:
from_token = from_token.copy_and_replace(
"room_key", leave_token
)
await self.hs.get_handlers().federation_handler.maybe_backfill(
- room_id, max_topo
+ room_id, curr_topo, limit=source_config.limit,
)
to_room_key = None
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index d5018afbda..d24a199318 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -19,6 +19,7 @@
import logging
import re
+from synapse.api.constants import RoomCreationPreset
from synapse.http.servlet import RestServlet
logger = logging.getLogger(__name__)
@@ -31,6 +32,20 @@ class VersionsRestServlet(RestServlet):
super().__init__()
self.config = hs.config
+ # Calculate these once since they shouldn't change after start-up.
+ self.e2ee_forced_public = (
+ RoomCreationPreset.PUBLIC_CHAT
+ in self.config.encryption_enabled_by_default_for_room_presets
+ )
+ self.e2ee_forced_private = (
+ RoomCreationPreset.PRIVATE_CHAT
+ in self.config.encryption_enabled_by_default_for_room_presets
+ )
+ self.e2ee_forced_trusted_private = (
+ RoomCreationPreset.TRUSTED_PRIVATE_CHAT
+ in self.config.encryption_enabled_by_default_for_room_presets
+ )
+
def on_GET(self, request):
return (
200,
@@ -62,6 +77,10 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc2432": True,
# Implements additional endpoints as described in MSC2666
"uk.half-shot.msc2666": True,
+ # Whether new rooms will be set to encrypted or not (based on presets).
+ "io.element.e2ee_forced.public": self.e2ee_forced_public,
+ "io.element.e2ee_forced.private": self.e2ee_forced_private,
+ "io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private,
},
},
)
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 5dac78e574..92e96468b4 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -640,23 +640,20 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta):
)
return "t%d-%d" % (row["topological_ordering"], row["stream_ordering"])
- async def get_max_topological_token(self, room_id: str, stream_key: int) -> int:
- """Get the max topological token in a room before the given stream
+ async def get_current_topological_token(self, room_id: str, stream_key: int) -> int:
+ """Gets the topological token in a room after or at the given stream
ordering.
Args:
room_id
stream_key
-
- Returns:
- The maximum topological token.
"""
sql = (
- "SELECT coalesce(max(topological_ordering), 0) FROM events"
- " WHERE room_id = ? AND stream_ordering < ?"
+ "SELECT coalesce(MIN(topological_ordering), 0) FROM events"
+ " WHERE room_id = ? AND stream_ordering >= ?"
)
row = await self.db_pool.execute(
- "get_max_topological_token", None, sql, room_id, stream_key
+ "get_current_topological_token", None, sql, room_id, stream_key
)
return row[0][0] if row else 0
|