diff options
author | Patrick Cloke <patrickc@matrix.org> | 2020-09-18 11:17:58 -0400 |
---|---|---|
committer | Patrick Cloke <patrickc@matrix.org> | 2020-09-18 11:17:58 -0400 |
commit | 00db7786de526edc9635f6a5d12f725c31d2f3f5 (patch) | |
tree | a270513d2f7e18cad96ae328e703490b89b878fb | |
parent | Admin API for querying rooms where a user is a member (#8306) (diff) | |
parent | Add a note about including the changes from 1.19.3. (diff) | |
download | synapse-00db7786de526edc9635f6a5d12f725c31d2f3f5.tar.xz |
Merge tag 'v1.20.0rc5' into develop
Synapse 1.20.0rc5 (2020-09-18) ============================== In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3. Features -------- - Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) Bugfixes -------- - Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) - Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349)) Internal Changes ---------------- - Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285))
-rw-r--r-- | CHANGES.md | 33 | ||||
-rw-r--r-- | changelog.d/8285.misc | 1 | ||||
-rw-r--r-- | changelog.d/8342.bugfix | 1 | ||||
-rw-r--r-- | debian/changelog | 6 | ||||
-rw-r--r-- | synapse/__init__.py | 2 | ||||
-rw-r--r-- | synapse/federation/federation_client.py | 8 | ||||
-rw-r--r-- | synapse/handlers/federation.py | 65 | ||||
-rw-r--r-- | synapse/handlers/pagination.py | 8 | ||||
-rw-r--r-- | synapse/rest/client/versions.py | 19 | ||||
-rw-r--r-- | synapse/storage/databases/main/stream.py | 13 |
10 files changed, 128 insertions, 28 deletions
diff --git a/CHANGES.md b/CHANGES.md index b44248e264..84976ab2bd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,36 @@ +Synapse 1.20.0rc5 (2020-09-18) +============================== + +In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3. + +Features +-------- + +- Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) + + +Bugfixes +-------- + +- Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) +- Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349)) + + +Internal Changes +---------------- + +- Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285)) + + +Synapse 1.19.3 (2020-09-18) +=========================== + +Bugfixes +-------- + +- Partially mitigate bug where newly joined servers couldn't get past events in a room when there is a malformed event. ([\#8350](https://github.com/matrix-org/synapse/issues/8350)) + + Synapse 1.20.0rc4 (2020-09-16) ============================== diff --git a/changelog.d/8285.misc b/changelog.d/8285.misc deleted file mode 100644 index 4646664ba1..0000000000 --- a/changelog.d/8285.misc +++ /dev/null @@ -1 +0,0 @@ -Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. \ No newline at end of file diff --git a/changelog.d/8342.bugfix b/changelog.d/8342.bugfix deleted file mode 100644 index 786057facb..0000000000 --- a/changelog.d/8342.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix ratelimitng of federation `/send` requests. diff --git a/debian/changelog b/debian/changelog index bb7c175ada..dbf01d6b1e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -4,6 +4,12 @@ matrix-synapse-py3 (1.20.0ubuntu1) UNRELEASED; urgency=medium -- Dexter Chua <dec41@srcf.net> Wed, 26 Aug 2020 12:41:36 +0000 +matrix-synapse-py3 (1.19.3) stable; urgency=medium + + * New synapse release 1.19.3. + + -- Synapse Packaging team <packages@matrix.org> Fri, 18 Sep 2020 14:59:30 +0100 + matrix-synapse-py3 (1.19.2) stable; urgency=medium * New synapse release 1.19.2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 6b11c5681b..a95753dcc7 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.20.0rc4" +__version__ = "1.20.0rc5" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 639d19f696..688d43fffb 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -217,11 +217,9 @@ class FederationClient(FederationBase): for p in transaction_data["pdus"] ] - # FIXME: We should handle signature failures more gracefully. - pdus[:] = await make_deferred_yieldable( - defer.gatherResults( - self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True, - ).addErrback(unwrapFirstError) + # Check signatures and hash of pdus, removing any from the list that fail checks + pdus[:] = await self._check_sigs_and_hash_and_fetch( + dest, pdus, outlier=True, room_version=room_version ) return pdus diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 96eeff7b1b..ea9264e751 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -917,15 +917,26 @@ class FederationHandler(BaseHandler): return events - async def maybe_backfill(self, room_id, current_depth): + async def maybe_backfill( + self, room_id: str, current_depth: int, limit: int + ) -> bool: """Checks the database to see if we should backfill before paginating, and if so do. + + Args: + room_id + current_depth: The depth from which we're paginating from. This is + used to decide if we should backfill and what extremities to + use. + limit: The number of events that the pagination request will + return. This is used as part of the heuristic to decide if we + should back paginate. """ extremities = await self.store.get_oldest_events_with_depth_in_room(room_id) if not extremities: logger.debug("Not backfilling as no extremeties found.") - return + return False # We only want to paginate if we can actually see the events we'll get, # as otherwise we'll just spend a lot of resources to get redacted @@ -978,16 +989,54 @@ class FederationHandler(BaseHandler): sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1])) max_depth = sorted_extremeties_tuple[0][1] + # If we're approaching an extremity we trigger a backfill, otherwise we + # no-op. + # + # We chose twice the limit here as then clients paginating backwards + # will send pagination requests that trigger backfill at least twice + # using the most recent extremity before it gets removed (see below). We + # chose more than one times the limit in case of failure, but choosing a + # much larger factor will result in triggering a backfill request much + # earlier than necessary. + if current_depth - 2 * limit > max_depth: + logger.debug( + "Not backfilling as we don't need to. %d < %d - 2 * %d", + max_depth, + current_depth, + limit, + ) + return False + + logger.debug( + "room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s", + room_id, + current_depth, + max_depth, + sorted_extremeties_tuple, + ) + + # We ignore extremities that have a greater depth than our current depth + # as: + # 1. we don't really care about getting events that have happened + # before our current position; and + # 2. we have likely previously tried and failed to backfill from that + # extremity, so to avoid getting "stuck" requesting the same + # backfill repeatedly we drop those extremities. + filtered_sorted_extremeties_tuple = [ + t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth + ] + + # However, we need to check that the filtered extremities are non-empty. + # If they are empty then either we can a) bail or b) still attempt to + # backill. We opt to try backfilling anyway just in case we do get + # relevant events. + if filtered_sorted_extremeties_tuple: + sorted_extremeties_tuple = filtered_sorted_extremeties_tuple + # We don't want to specify too many extremities as it causes the backfill # request URI to be too long. extremities = dict(sorted_extremeties_tuple[:5]) - if current_depth > max_depth: - logger.debug( - "Not backfilling as we don't need to. %d < %d", max_depth, current_depth - ) - return - # Now we need to decide which hosts to hit first. # First we try hosts that are already in the room diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index d929a68f7d..f132ed3368 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -358,9 +358,9 @@ class PaginationHandler: # if we're going backwards, we might need to backfill. This # requires that we have a topo token. if room_token.topological: - max_topo = room_token.topological + curr_topo = room_token.topological else: - max_topo = await self.store.get_max_topological_token( + curr_topo = await self.store.get_current_topological_token( room_id, room_token.stream ) @@ -379,13 +379,13 @@ class PaginationHandler: leave_token = RoomStreamToken.parse(leave_token_str) assert leave_token.topological is not None - if leave_token.topological < max_topo: + if leave_token.topological < curr_topo: from_token = from_token.copy_and_replace( "room_key", leave_token ) await self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, max_topo + room_id, curr_topo, limit=source_config.limit, ) to_room_key = None diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index d5018afbda..d24a199318 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -19,6 +19,7 @@ import logging import re +from synapse.api.constants import RoomCreationPreset from synapse.http.servlet import RestServlet logger = logging.getLogger(__name__) @@ -31,6 +32,20 @@ class VersionsRestServlet(RestServlet): super().__init__() self.config = hs.config + # Calculate these once since they shouldn't change after start-up. + self.e2ee_forced_public = ( + RoomCreationPreset.PUBLIC_CHAT + in self.config.encryption_enabled_by_default_for_room_presets + ) + self.e2ee_forced_private = ( + RoomCreationPreset.PRIVATE_CHAT + in self.config.encryption_enabled_by_default_for_room_presets + ) + self.e2ee_forced_trusted_private = ( + RoomCreationPreset.TRUSTED_PRIVATE_CHAT + in self.config.encryption_enabled_by_default_for_room_presets + ) + def on_GET(self, request): return ( 200, @@ -62,6 +77,10 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc2432": True, # Implements additional endpoints as described in MSC2666 "uk.half-shot.msc2666": True, + # Whether new rooms will be set to encrypted or not (based on presets). + "io.element.e2ee_forced.public": self.e2ee_forced_public, + "io.element.e2ee_forced.private": self.e2ee_forced_private, + "io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private, }, }, ) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 5dac78e574..92e96468b4 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -640,23 +640,20 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): ) return "t%d-%d" % (row["topological_ordering"], row["stream_ordering"]) - async def get_max_topological_token(self, room_id: str, stream_key: int) -> int: - """Get the max topological token in a room before the given stream + async def get_current_topological_token(self, room_id: str, stream_key: int) -> int: + """Gets the topological token in a room after or at the given stream ordering. Args: room_id stream_key - - Returns: - The maximum topological token. """ sql = ( - "SELECT coalesce(max(topological_ordering), 0) FROM events" - " WHERE room_id = ? AND stream_ordering < ?" + "SELECT coalesce(MIN(topological_ordering), 0) FROM events" + " WHERE room_id = ? AND stream_ordering >= ?" ) row = await self.db_pool.execute( - "get_max_topological_token", None, sql, room_id, stream_key + "get_current_topological_token", None, sql, room_id, stream_key ) return row[0][0] if row else 0 |