From e8519e0ed289b67fa07c1bdbb6898852dc1a50b9 Mon Sep 17 00:00:00 2001 From: Jan Schär Date: Mon, 25 Jul 2022 17:27:19 +0200 Subject: Support Implicit TLS for sending emails (#13317) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, TLS could only be used with STARTTLS. Add a new option `force_tls`, where TLS is used from the start. Implicit TLS is recommended over STARTLS, see https://datatracker.ietf.org/doc/html/rfc8314 Fixes #8046. Signed-off-by: Jan Schär --- tests/handlers/test_send_email.py | 57 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/handlers/test_send_email.py b/tests/handlers/test_send_email.py index 6f77b1237c..da4bf8b582 100644 --- a/tests/handlers/test_send_email.py +++ b/tests/handlers/test_send_email.py @@ -23,7 +23,7 @@ from twisted.internet.defer import ensureDeferred from twisted.mail import interfaces, smtp from tests.server import FakeTransport -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config @implementer(interfaces.IMessageDelivery) @@ -110,3 +110,58 @@ class SendEmailHandlerTestCase(HomeserverTestCase): user, msg = message_delivery.messages.pop() self.assertEqual(str(user), "foo@bar.com") self.assertIn(b"Subject: test subject", msg) + + @override_config( + { + "email": { + "notif_from": "noreply@test", + "force_tls": True, + }, + } + ) + def test_send_email_force_tls(self): + """Happy-path test that we can send email to an Implicit TLS server.""" + h = self.hs.get_send_email_handler() + d = ensureDeferred( + h.send_email( + "foo@bar.com", "test subject", "Tests", "HTML content", "Text content" + ) + ) + # there should be an attempt to connect to localhost:465 + self.assertEqual(len(self.reactor.sslClients), 1) + ( + host, + port, + client_factory, + contextFactory, + _timeout, + _bindAddress, + ) = self.reactor.sslClients[0] + self.assertEqual(host, "localhost") + self.assertEqual(port, 465) + + # wire it up to an SMTP server + message_delivery = _DummyMessageDelivery() + server_protocol = smtp.ESMTP() + server_protocol.delivery = message_delivery + # make sure that the server uses the test reactor to set timeouts + server_protocol.callLater = self.reactor.callLater # type: ignore[assignment] + + client_protocol = client_factory.buildProtocol(None) + client_protocol.makeConnection(FakeTransport(server_protocol, self.reactor)) + server_protocol.makeConnection( + FakeTransport( + client_protocol, + self.reactor, + peer_address=IPv4Address("TCP", "127.0.0.1", 1234), + ) + ) + + # the message should now get delivered + self.get_success(d, by=0.1) + + # check it arrived + self.assertEqual(len(message_delivery.messages), 1) + user, msg = message_delivery.messages.pop() + self.assertEqual(str(user), "foo@bar.com") + self.assertIn(b"Subject: test subject", msg) -- cgit 1.5.1 From 335ebb21ccc0ae906169f21dcfc456c869bdd301 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 26 Jul 2022 12:39:23 +0100 Subject: Faster room joins: avoid blocking when pulling events with missing prevs (#13355) Avoid blocking on full state in `_resolve_state_at_missing_prevs` and return a new flag indicating whether the resolved state is partial. Thread that flag around so that it makes it into the event context. Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/13355.misc | 1 + synapse/handlers/federation_event.py | 116 +++++++++++++++++++++++++++-------- synapse/handlers/message.py | 4 ++ synapse/state/__init__.py | 18 ++++-- synapse/storage/controllers/state.py | 8 ++- tests/handlers/test_federation.py | 1 + tests/storage/test_events.py | 7 ++- tests/test_state.py | 2 + 8 files changed, 124 insertions(+), 33 deletions(-) create mode 100644 changelog.d/13355.misc (limited to 'tests') diff --git a/changelog.d/13355.misc b/changelog.d/13355.misc new file mode 100644 index 0000000000..7715075885 --- /dev/null +++ b/changelog.d/13355.misc @@ -0,0 +1 @@ +Faster room joins: avoid blocking when pulling events with partially missing prev events. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 16f20c8be7..fc1254d2ad 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -278,7 +278,9 @@ class FederationEventHandler: ) try: - await self._process_received_pdu(origin, pdu, state_ids=None) + await self._process_received_pdu( + origin, pdu, state_ids=None, partial_state=None + ) except PartialStateConflictError: # The room was un-partial stated while we were processing the PDU. # Try once more, with full state this time. @@ -286,7 +288,9 @@ class FederationEventHandler: "Room %s was un-partial stated while processing the PDU, trying again.", room_id, ) - await self._process_received_pdu(origin, pdu, state_ids=None) + await self._process_received_pdu( + origin, pdu, state_ids=None, partial_state=None + ) async def on_send_membership_event( self, origin: str, event: EventBase @@ -534,14 +538,36 @@ class FederationEventHandler: # # This is the same operation as we do when we receive a regular event # over federation. - state_ids = await self._resolve_state_at_missing_prevs(destination, event) - - # build a new state group for it if need be - context = await self._state_handler.compute_event_context( - event, - state_ids_before_event=state_ids, + state_ids, partial_state = await self._resolve_state_at_missing_prevs( + destination, event ) - if context.partial_state: + + # There are three possible cases for (state_ids, partial_state): + # * `state_ids` and `partial_state` are both `None` if we had all the + # prev_events. The prev_events may or may not have partial state and + # we won't know until we compute the event context. + # * `state_ids` is not `None` and `partial_state` is `False` if we were + # missing some prev_events (but we have full state for any we did + # have). We calculated the full state after the prev_events. + # * `state_ids` is not `None` and `partial_state` is `True` if we were + # missing some, but not all, prev_events. At least one of the + # prev_events we did have had partial state, so we calculated a partial + # state after the prev_events. + + context = None + if state_ids is not None and partial_state: + # the state after the prev events is still partial. We can't de-partial + # state the event, so don't bother building the event context. + pass + else: + # build a new state group for it if need be + context = await self._state_handler.compute_event_context( + event, + state_ids_before_event=state_ids, + partial_state=partial_state, + ) + + if context is None or context.partial_state: # this can happen if some or all of the event's prev_events still have # partial state - ie, an event has an earlier stream_ordering than one # or more of its prev_events, so we de-partial-state it before its @@ -806,14 +832,39 @@ class FederationEventHandler: return try: - state_ids = await self._resolve_state_at_missing_prevs(origin, event) - # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does - # not return partial state - # https://github.com/matrix-org/synapse/issues/13002 + try: + state_ids, partial_state = await self._resolve_state_at_missing_prevs( + origin, event + ) + await self._process_received_pdu( + origin, + event, + state_ids=state_ids, + partial_state=partial_state, + backfilled=backfilled, + ) + except PartialStateConflictError: + # The room was un-partial stated while we were processing the event. + # Try once more, with full state this time. + state_ids, partial_state = await self._resolve_state_at_missing_prevs( + origin, event + ) - await self._process_received_pdu( - origin, event, state_ids=state_ids, backfilled=backfilled - ) + # We ought to have full state now, barring some unlikely race where we left and + # rejoned the room in the background. + if state_ids is not None and partial_state: + raise AssertionError( + f"Event {event.event_id} still has a partial resolved state " + f"after room {event.room_id} was un-partial stated" + ) + + await self._process_received_pdu( + origin, + event, + state_ids=state_ids, + partial_state=partial_state, + backfilled=backfilled, + ) except FederationError as e: if e.code == 403: logger.warning("Pulled event %s failed history check.", event_id) @@ -822,7 +873,7 @@ class FederationEventHandler: async def _resolve_state_at_missing_prevs( self, dest: str, event: EventBase - ) -> Optional[StateMap[str]]: + ) -> Tuple[Optional[StateMap[str]], Optional[bool]]: """Calculate the state at an event with missing prev_events. This is used when we have pulled a batch of events from a remote server, and @@ -849,8 +900,10 @@ class FederationEventHandler: event: an event to check for missing prevs. Returns: - if we already had all the prev events, `None`. Otherwise, returns - the event ids of the state at `event`. + if we already had all the prev events, `None, None`. Otherwise, returns a + tuple containing: + * the event ids of the state at `event`. + * a boolean indicating whether the state may be partial. Raises: FederationError if we fail to get the state from the remote server after any @@ -864,7 +917,7 @@ class FederationEventHandler: missing_prevs = prevs - seen if not missing_prevs: - return None + return None, None logger.info( "Event %s is missing prev_events %s: calculating state for a " @@ -876,9 +929,15 @@ class FederationEventHandler: # resolve them to find the correct state at the current event. try: + # Determine whether we may be about to retrieve partial state + # Events may be un-partial stated right after we compute the partial state + # flag, but that's okay, as long as the flag errs on the conservative side. + partial_state_flags = await self._store.get_partial_state_events(seen) + partial_state = any(partial_state_flags.values()) + # Get the state of the events we know about ours = await self._state_storage_controller.get_state_groups_ids( - room_id, seen + room_id, seen, await_full_state=False ) # state_maps is a list of mappings from (type, state_key) to event_id @@ -924,7 +983,7 @@ class FederationEventHandler: "We can't get valid state history.", affected=event_id, ) - return state_map + return state_map, partial_state async def _get_state_ids_after_missing_prev_event( self, @@ -1094,6 +1153,7 @@ class FederationEventHandler: origin: str, event: EventBase, state_ids: Optional[StateMap[str]], + partial_state: Optional[bool], backfilled: bool = False, ) -> None: """Called when we have a new non-outlier event. @@ -1117,14 +1177,21 @@ class FederationEventHandler: state_ids: Normally None, but if we are handling a gap in the graph (ie, we are missing one or more prev_events), the resolved state at the - event. Must not be partial state. + event + + partial_state: + `True` if `state_ids` is partial and omits non-critical membership + events. + `False` if `state_ids` is the full state. + `None` if `state_ids` is not provided. In this case, the flag will be + calculated based on `event`'s prev events. backfilled: True if this is part of a historical batch of events (inhibits notification to clients, and validation of device keys.) PartialStateConflictError: if the room was un-partial stated in between computing the state at the event and persisting it. The caller should retry - exactly once in this case. Will never be raised if `state_ids` is provided. + exactly once in this case. """ logger.debug("Processing event: %s", event) assert not event.internal_metadata.outlier @@ -1132,6 +1199,7 @@ class FederationEventHandler: context = await self._state_handler.compute_event_context( event, state_ids_before_event=state_ids, + partial_state=partial_state, ) try: await self._check_event_auth(origin, event, context) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index bd7baef051..e0bcc40b93 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1135,6 +1135,10 @@ class EventCreationHandler: context = await self.state.compute_event_context( event, state_ids_before_event=state_map_for_event, + # TODO(faster_joins): check how MSC2716 works and whether we can have + # partial state here + # https://github.com/matrix-org/synapse/issues/13003 + partial_state=False, ) else: context = await self.state.compute_event_context(event) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 87ccd52f0a..69834de0de 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -255,7 +255,7 @@ class StateHandler: self, event: EventBase, state_ids_before_event: Optional[StateMap[str]] = None, - partial_state: bool = False, + partial_state: Optional[bool] = None, ) -> EventContext: """Build an EventContext structure for a non-outlier event. @@ -270,8 +270,12 @@ class StateHandler: it can't be calculated from existing events. This is normally only specified when receiving an event from federation where we don't have the prev events, e.g. when backfilling. - partial_state: True if `state_ids_before_event` is partial and omits - non-critical membership events + partial_state: + `True` if `state_ids_before_event` is partial and omits non-critical + membership events. + `False` if `state_ids_before_event` is the full state. + `None` when `state_ids_before_event` is not provided. In this case, the + flag will be calculated based on `event`'s prev events. Returns: The event context. """ @@ -298,12 +302,14 @@ class StateHandler: ) ) + # the partial_state flag must be provided + assert partial_state is not None else: # otherwise, we'll need to resolve the state across the prev_events. # partial_state should not be set explicitly in this case: # we work it out dynamically - assert not partial_state + assert partial_state is None # if any of the prev-events have partial state, so do we. # (This is slightly racy - the prev-events might get fixed up before we use @@ -313,13 +319,13 @@ class StateHandler: incomplete_prev_events = await self.store.get_partial_state_events( prev_event_ids ) - if any(incomplete_prev_events.values()): + partial_state = any(incomplete_prev_events.values()) + if partial_state: logger.debug( "New/incoming event %s refers to prev_events %s with partial state", event.event_id, [k for (k, v) in incomplete_prev_events.items() if v], ) - partial_state = True logger.debug("calling resolve_state_groups from compute_event_context") # we've already taken into account partial state, so no need to wait for diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index e08f956e6e..20805c94fa 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -82,13 +82,15 @@ class StateStorageController: return state_group_delta.prev_group, state_group_delta.delta_ids async def get_state_groups_ids( - self, _room_id: str, event_ids: Collection[str] + self, _room_id: str, event_ids: Collection[str], await_full_state: bool = True ) -> Dict[int, MutableStateMap[str]]: """Get the event IDs of all the state for the state groups for the given events Args: _room_id: id of the room for these events event_ids: ids of the events + await_full_state: if `True`, will block if we do not yet have complete + state at these events. Returns: dict of state_group_id -> (dict of (type, state_key) -> event id) @@ -100,7 +102,9 @@ class StateStorageController: if not event_ids: return {} - event_to_groups = await self.get_state_group_for_events(event_ids) + event_to_groups = await self.get_state_group_for_events( + event_ids, await_full_state=await_full_state + ) groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups(groups) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 8a0bb91f40..fb06e5e812 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -287,6 +287,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): state_ids={ (e.type, e.state_key): e.event_id for e in current_state }, + partial_state=False, ) ) diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 2ff88e64a5..3ce4f35cb7 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -70,7 +70,11 @@ class ExtremPruneTestCase(HomeserverTestCase): def persist_event(self, event, state=None): """Persist the event, with optional state""" context = self.get_success( - self.state.compute_event_context(event, state_ids_before_event=state) + self.state.compute_event_context( + event, + state_ids_before_event=state, + partial_state=None if state is None else False, + ) ) self.get_success(self._persistence.persist_event(event, context)) @@ -148,6 +152,7 @@ class ExtremPruneTestCase(HomeserverTestCase): self.state.compute_event_context( remote_event_2, state_ids_before_event=state_before_gap, + partial_state=False, ) ) diff --git a/tests/test_state.py b/tests/test_state.py index bafd6d1750..504530b49a 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -462,6 +462,7 @@ class StateTestCase(unittest.TestCase): state_ids_before_event={ (e.type, e.state_key): e.event_id for e in old_state }, + partial_state=False, ) ) @@ -492,6 +493,7 @@ class StateTestCase(unittest.TestCase): state_ids_before_event={ (e.type, e.state_key): e.event_id for e in old_state }, + partial_state=False, ) ) -- cgit 1.5.1 From 39be5bc550f2e882b4754c7d98c906d9bde8b649 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 27 Jul 2022 10:37:50 +0000 Subject: Make minor clarifications to the error messages given when we fail to join a room via any server. (#13160) --- changelog.d/13160.misc | 1 + synapse/federation/federation_client.py | 8 +++++++- synapse/handlers/room_member.py | 6 +++++- tests/rest/admin/test_room.py | 5 ++++- 4 files changed, 17 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13160.misc (limited to 'tests') diff --git a/changelog.d/13160.misc b/changelog.d/13160.misc new file mode 100644 index 0000000000..36ff50c2a6 --- /dev/null +++ b/changelog.d/13160.misc @@ -0,0 +1 @@ +Make minor clarifications to the error messages given when we fail to join a room via any server. \ No newline at end of file diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 02276ed995..6a8d76529b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -725,6 +725,12 @@ class FederationClient(FederationBase): if failover_errcodes is None: failover_errcodes = () + if not destinations: + # Give a bit of a clearer message if no servers were specified at all. + raise SynapseError( + 502, f"Failed to {description} via any server: No servers specified." + ) + for destination in destinations: if destination == self.server_name: continue @@ -774,7 +780,7 @@ class FederationClient(FederationBase): "Failed to %s via %s", description, destination, exc_info=True ) - raise SynapseError(502, "Failed to %s via any server" % (description,)) + raise SynapseError(502, f"Failed to {description} via any server") async def make_membership_event( self, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 30b4cb23df..520c52e013 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1679,7 +1679,11 @@ class RoomMemberMasterHandler(RoomMemberHandler): ] if len(remote_room_hosts) == 0: - raise SynapseError(404, "No known servers") + raise SynapseError( + 404, + "Can't join remote room because no servers " + "that are in the room have been provided.", + ) check_complexity = self.hs.config.server.limit_remote_rooms.enabled if ( diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 2526136ff8..623883b53c 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1873,7 +1873,10 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase): ) self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) - self.assertEqual("No known servers", channel.json_body["error"]) + self.assertEqual( + "Can't join remote room because no servers that are in the room have been provided.", + channel.json_body["error"], + ) def test_room_is_not_valid(self) -> None: """ -- cgit 1.5.1 From 502f075e96b458a183952ae2be402f00b28af299 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 27 Jul 2022 13:44:40 +0100 Subject: Implement MSC3848: Introduce errcodes for specific event sending failures (#13343) Implements MSC3848 --- changelog.d/13343.feature | 1 + synapse/api/auth.py | 11 +++-- synapse/api/errors.py | 58 ++++++++++++++++++++++----- synapse/config/experimental.py | 3 ++ synapse/event_auth.py | 62 ++++++++++++++++++++++++----- synapse/federation/federation_server.py | 2 +- synapse/handlers/auth.py | 2 +- synapse/handlers/message.py | 13 +++++- synapse/handlers/room_summary.py | 5 ++- synapse/http/server.py | 18 +++++++-- tests/rest/client/test_third_party_rules.py | 5 ++- 11 files changed, 144 insertions(+), 36 deletions(-) create mode 100644 changelog.d/13343.feature (limited to 'tests') diff --git a/changelog.d/13343.feature b/changelog.d/13343.feature new file mode 100644 index 0000000000..c151251e54 --- /dev/null +++ b/changelog.d/13343.feature @@ -0,0 +1 @@ +Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in MSC3848. \ No newline at end of file diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 6e6eaf3805..82e6475ef5 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( Codes, InvalidClientTokenError, MissingClientTokenError, + UnstableSpecAuthError, ) from synapse.appservice import ApplicationService from synapse.http import get_request_user_agent @@ -106,8 +107,11 @@ class Auth: forgot = await self.store.did_forget(user_id, room_id) if not forgot: return membership, member_event_id - - raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) + raise UnstableSpecAuthError( + 403, + "User %s not in room %s" % (user_id, room_id), + errcode=Codes.NOT_JOINED, + ) async def get_user_by_req( self, @@ -600,8 +604,9 @@ class Auth: == HistoryVisibility.WORLD_READABLE ): return Membership.JOIN, None - raise AuthError( + raise UnstableSpecAuthError( 403, "User %s not in room %s, and room previews are disabled" % (user_id, room_id), + errcode=Codes.NOT_JOINED, ) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 1c74e131f2..e6dea89c6d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -26,6 +26,7 @@ from twisted.web import http from synapse.util import json_decoder if typing.TYPE_CHECKING: + from synapse.config.homeserver import HomeServerConfig from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -80,6 +81,12 @@ class Codes(str, Enum): INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" + # Part of MSC3848 + # https://github.com/matrix-org/matrix-spec-proposals/pull/3848 + ALREADY_JOINED = "ORG.MATRIX.MSC3848.ALREADY_JOINED" + NOT_JOINED = "ORG.MATRIX.MSC3848.NOT_JOINED" + INSUFFICIENT_POWER = "ORG.MATRIX.MSC3848.INSUFFICIENT_POWER" + # The account has been suspended on the server. # By opposition to `USER_DEACTIVATED`, this is a reversible measure # that can possibly be appealed and reverted. @@ -167,7 +174,7 @@ class SynapseError(CodeMessageException): else: self._additional_fields = dict(additional_fields) - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, **self._additional_fields) @@ -213,7 +220,7 @@ class ConsentNotGivenError(SynapseError): ) self._consent_uri = consent_uri - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri) @@ -307,6 +314,37 @@ class AuthError(SynapseError): super().__init__(code, msg, errcode, additional_fields) +class UnstableSpecAuthError(AuthError): + """An error raised when a new error code is being proposed to replace a previous one. + This error will return a "org.matrix.unstable.errcode" property with the new error code, + with the previous error code still being defined in the "errcode" property. + + This error will include `org.matrix.msc3848.unstable.errcode` in the C-S error body. + """ + + def __init__( + self, + code: int, + msg: str, + errcode: str, + previous_errcode: str = Codes.FORBIDDEN, + additional_fields: Optional[dict] = None, + ): + self.previous_errcode = previous_errcode + super().__init__(code, msg, errcode, additional_fields) + + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": + fields = {} + if config is not None and config.experimental.msc3848_enabled: + fields["org.matrix.msc3848.unstable.errcode"] = self.errcode + return cs_error( + self.msg, + self.previous_errcode, + **fields, + **self._additional_fields, + ) + + class InvalidClientCredentialsError(SynapseError): """An error raised when there was a problem with the authorisation credentials in a client request. @@ -338,8 +376,8 @@ class InvalidClientTokenError(InvalidClientCredentialsError): super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN") self._soft_logout = soft_logout - def error_dict(self) -> "JsonDict": - d = super().error_dict() + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": + d = super().error_dict(config) d["soft_logout"] = self._soft_logout return d @@ -362,7 +400,7 @@ class ResourceLimitError(SynapseError): self.limit_type = limit_type super().__init__(code, msg, errcode=errcode) - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error( self.msg, self.errcode, @@ -397,7 +435,7 @@ class InvalidCaptchaError(SynapseError): super().__init__(code, msg, errcode) self.error_url = error_url - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, error_url=self.error_url) @@ -414,7 +452,7 @@ class LimitExceededError(SynapseError): super().__init__(code, msg, errcode) self.retry_after_ms = retry_after_ms - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms) @@ -429,7 +467,7 @@ class RoomKeysVersionError(SynapseError): super().__init__(403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION) self.current_version = current_version - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, current_version=self.current_version) @@ -469,7 +507,7 @@ class IncompatibleRoomVersionError(SynapseError): self._room_version = room_version - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, room_version=self._room_version) @@ -515,7 +553,7 @@ class UnredactedContentDeletedError(SynapseError): ) self.content_keep_ms = content_keep_ms - def error_dict(self) -> "JsonDict": + def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": extra = {} if self.content_keep_ms is not None: extra = {"fi.mau.msc2815.content_keep_ms": self.content_keep_ms} diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index ee443cea00..1902222d7b 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -90,3 +90,6 @@ class ExperimentalConfig(Config): # MSC3827: Filtering of /publicRooms by room type self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False) + + # MSC3848: Introduce errcodes for specific event sending failures + self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 965cb265da..389b0c5d53 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -30,7 +30,13 @@ from synapse.api.constants import ( JoinRules, Membership, ) -from synapse.api.errors import AuthError, EventSizeError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + EventSizeError, + SynapseError, + UnstableSpecAuthError, +) from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, EventFormatVersions, @@ -291,7 +297,11 @@ def check_state_dependent_auth_rules( invite_level = get_named_level(auth_dict, "invite", 0) if user_level < invite_level: - raise AuthError(403, "You don't have permission to invite users") + raise UnstableSpecAuthError( + 403, + "You don't have permission to invite users", + errcode=Codes.INSUFFICIENT_POWER, + ) else: logger.debug("Allowing! %s", event) return @@ -474,7 +484,11 @@ def _is_membership_change_allowed( return if not caller_in_room: # caller isn't joined - raise AuthError(403, "%s not in room %s." % (event.user_id, event.room_id)) + raise UnstableSpecAuthError( + 403, + "%s not in room %s." % (event.user_id, event.room_id), + errcode=Codes.NOT_JOINED, + ) if Membership.INVITE == membership: # TODO (erikj): We should probably handle this more intelligently @@ -484,10 +498,18 @@ def _is_membership_change_allowed( if target_banned: raise AuthError(403, "%s is banned from the room" % (target_user_id,)) elif target_in_room: # the target is already in the room. - raise AuthError(403, "%s is already in the room." % target_user_id) + raise UnstableSpecAuthError( + 403, + "%s is already in the room." % target_user_id, + errcode=Codes.ALREADY_JOINED, + ) else: if user_level < invite_level: - raise AuthError(403, "You don't have permission to invite users") + raise UnstableSpecAuthError( + 403, + "You don't have permission to invite users", + errcode=Codes.INSUFFICIENT_POWER, + ) elif Membership.JOIN == membership: # Joins are valid iff caller == target and: # * They are not banned. @@ -549,15 +571,27 @@ def _is_membership_change_allowed( elif Membership.LEAVE == membership: # TODO (erikj): Implement kicks. if target_banned and user_level < ban_level: - raise AuthError(403, "You cannot unban user %s." % (target_user_id,)) + raise UnstableSpecAuthError( + 403, + "You cannot unban user %s." % (target_user_id,), + errcode=Codes.INSUFFICIENT_POWER, + ) elif target_user_id != event.user_id: kick_level = get_named_level(auth_events, "kick", 50) if user_level < kick_level or user_level <= target_level: - raise AuthError(403, "You cannot kick user %s." % target_user_id) + raise UnstableSpecAuthError( + 403, + "You cannot kick user %s." % target_user_id, + errcode=Codes.INSUFFICIENT_POWER, + ) elif Membership.BAN == membership: if user_level < ban_level or user_level <= target_level: - raise AuthError(403, "You don't have permission to ban") + raise UnstableSpecAuthError( + 403, + "You don't have permission to ban", + errcode=Codes.INSUFFICIENT_POWER, + ) elif room_version.msc2403_knocking and Membership.KNOCK == membership: if join_rule != JoinRules.KNOCK and ( not room_version.msc3787_knock_restricted_join_rule @@ -567,7 +601,11 @@ def _is_membership_change_allowed( elif target_user_id != event.user_id: raise AuthError(403, "You cannot knock for other users") elif target_in_room: - raise AuthError(403, "You cannot knock on a room you are already in") + raise UnstableSpecAuthError( + 403, + "You cannot knock on a room you are already in", + errcode=Codes.ALREADY_JOINED, + ) elif caller_invited: raise AuthError(403, "You are already invited to this room") elif target_banned: @@ -638,10 +676,11 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b user_level = get_user_power_level(event.user_id, auth_events) if user_level < send_level: - raise AuthError( + raise UnstableSpecAuthError( 403, "You don't have permission to post that to the room. " + "user_level (%d) < send_level (%d)" % (user_level, send_level), + errcode=Codes.INSUFFICIENT_POWER, ) # Check state_key @@ -716,9 +755,10 @@ def check_historical( historical_level = get_named_level(auth_events, "historical", 100) if user_level < historical_level: - raise AuthError( + raise UnstableSpecAuthError( 403, 'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")', + errcode=Codes.INSUFFICIENT_POWER, ) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index ae550d3f4d..1d60137411 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -469,7 +469,7 @@ class FederationServer(FederationBase): ) for pdu in pdus_by_room[room_id]: event_id = pdu.event_id - pdu_results[event_id] = e.error_dict() + pdu_results[event_id] = e.error_dict(self.hs.config) return for pdu in pdus_by_room[room_id]: diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 3d83236b0c..bfa5535044 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -565,7 +565,7 @@ class AuthHandler: except LoginError as e: # this step failed. Merge the error dict into the response # so that the client can have another go. - errordict = e.error_dict() + errordict = e.error_dict(self.hs.config) creds = await self.store.get_completed_ui_auth_stages(session.session_id) for f in flows: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e0bcc40b93..e85b540451 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -41,6 +41,7 @@ from synapse.api.errors import ( NotFoundError, ShadowBanError, SynapseError, + UnstableSpecAuthError, UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -149,7 +150,11 @@ class MessageHandler: "Attempted to retrieve data from a room for a user that has never been in it. " "This should not have happened." ) - raise SynapseError(403, "User not in room", errcode=Codes.FORBIDDEN) + raise UnstableSpecAuthError( + 403, + "User not in room", + errcode=Codes.NOT_JOINED, + ) return data @@ -334,7 +339,11 @@ class MessageHandler: break else: # Loop fell through, AS has no interested users in room - raise AuthError(403, "Appservice not in room") + raise UnstableSpecAuthError( + 403, + "Appservice not in room", + errcode=Codes.NOT_JOINED, + ) return { user_id: { diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 85811b5bde..ebd445adca 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -28,11 +28,11 @@ from synapse.api.constants import ( RoomTypes, ) from synapse.api.errors import ( - AuthError, Codes, NotFoundError, StoreError, SynapseError, + UnstableSpecAuthError, UnsupportedRoomVersionError, ) from synapse.api.ratelimiting import Ratelimiter @@ -175,10 +175,11 @@ class RoomSummaryHandler: # First of all, check that the room is accessible. if not await self._is_local_room_accessible(requested_room_id, requester): - raise AuthError( + raise UnstableSpecAuthError( 403, "User %s not in room %s, and room previews are disabled" % (requester, requested_room_id), + errcode=Codes.NOT_JOINED, ) # If this is continuing a previous session, pull the persisted data. diff --git a/synapse/http/server.py b/synapse/http/server.py index cf2d6f904b..19f42159b8 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -58,6 +58,7 @@ from synapse.api.errors import ( SynapseError, UnrecognizedRequestError, ) +from synapse.config.homeserver import HomeServerConfig from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet @@ -155,15 +156,16 @@ def is_method_cancellable(method: Callable[..., Any]) -> bool: return getattr(method, "cancellable", False) -def return_json_error(f: failure.Failure, request: SynapseRequest) -> None: +def return_json_error( + f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig] +) -> None: """Sends a JSON error response to clients.""" if f.check(SynapseError): # mypy doesn't understand that f.check asserts the type. exc: SynapseError = f.value # type: ignore error_code = exc.code - error_dict = exc.error_dict() - + error_dict = exc.error_dict(config) logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg) elif f.check(CancelledError): error_code = HTTP_STATUS_REQUEST_CANCELLED @@ -450,7 +452,7 @@ class DirectServeJsonResource(_AsyncResource): request: SynapseRequest, ) -> None: """Implements _AsyncResource._send_error_response""" - return_json_error(f, request) + return_json_error(f, request, None) @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -575,6 +577,14 @@ class JsonResource(DirectServeJsonResource): return callback_return + def _send_error_response( + self, + f: failure.Failure, + request: SynapseRequest, + ) -> None: + """Implements _AsyncResource._send_error_response""" + return_json_error(f, request, self.hs.config) + class DirectServeHtmlResource(_AsyncResource): """A resource that will call `self._async_on_` on new requests, diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 9a48e9286f..18a7195409 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -20,6 +20,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, LoginType, Membership from synapse.api.errors import SynapseError from synapse.api.room_versions import RoomVersion +from synapse.config.homeserver import HomeServerConfig from synapse.events import EventBase from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.rest import admin @@ -185,12 +186,12 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ class NastyHackException(SynapseError): - def error_dict(self) -> JsonDict: + def error_dict(self, config: Optional[HomeServerConfig]) -> JsonDict: """ This overrides SynapseError's `error_dict` to nastily inject JSON into the error response. """ - result = super().error_dict() + result = super().error_dict(config) result["nasty"] = "very" return result -- cgit 1.5.1 From 922b771337f6d14a556fa761c783748f698e924b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 27 Jul 2022 13:18:41 -0400 Subject: Add missing type hints for tests.unittest. (#13397) --- changelog.d/13397.misc | 1 + tests/handlers/test_directory.py | 12 +----- tests/rest/client/test_relations.py | 6 ++- tests/rest/client/test_rooms.py | 2 +- tests/server.py | 11 ++++- tests/unittest.py | 86 +++++++++++++++++++++---------------- 6 files changed, 66 insertions(+), 52 deletions(-) create mode 100644 changelog.d/13397.misc (limited to 'tests') diff --git a/changelog.d/13397.misc b/changelog.d/13397.misc new file mode 100644 index 0000000000..8dc610d9e2 --- /dev/null +++ b/changelog.d/13397.misc @@ -0,0 +1 @@ +Adding missing type hints to tests. diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 53d49ca896..3b72c4c9d0 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -481,17 +481,13 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): return config - def prepare( - self, reactor: MemoryReactor, clock: Clock, hs: HomeServer - ) -> HomeServer: + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.allowed_user_id = self.register_user(self.allowed_localpart, "pass") self.allowed_access_token = self.login(self.allowed_localpart, "pass") self.denied_user_id = self.register_user("denied", "pass") self.denied_access_token = self.login("denied", "pass") - return hs - def test_denied_without_publication_permission(self) -> None: """ Try to create a room, register an alias for it, and publish it, @@ -575,9 +571,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def prepare( - self, reactor: MemoryReactor, clock: Clock, hs: HomeServer - ) -> HomeServer: + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -588,8 +582,6 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): self.room_list_handler = hs.get_room_list_handler() self.directory_handler = hs.get_directory_handler() - return hs - def test_disabling_room_list(self) -> None: self.room_list_handler.enable_room_list_search = True self.directory_handler.enable_room_list_search = True diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index ad03eee17b..d589f07314 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1060,6 +1060,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): participated, bundled_aggregations.get("current_user_participated") ) # The latest thread event has some fields that don't matter. + self.assertIn("latest_event", bundled_aggregations) self.assert_dict( { "content": { @@ -1072,7 +1073,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): "sender": self.user2_id, "type": "m.room.test", }, - bundled_aggregations.get("latest_event"), + bundled_aggregations["latest_event"], ) return assert_thread @@ -1112,6 +1113,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): self.assertEqual(2, bundled_aggregations.get("count")) self.assertTrue(bundled_aggregations.get("current_user_participated")) # The latest thread event has some fields that don't matter. + self.assertIn("latest_event", bundled_aggregations) self.assert_dict( { "content": { @@ -1124,7 +1126,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): "sender": self.user_id, "type": "m.room.test", }, - bundled_aggregations.get("latest_event"), + bundled_aggregations["latest_event"], ) # Check the unsigned field on the latest event. self.assert_dict( diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index c45cb32090..2272d55d84 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -496,7 +496,7 @@ class RoomStateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertCountEqual( - [state_event["type"] for state_event in channel.json_body], + [state_event["type"] for state_event in channel.json_list], { "m.room.create", "m.room.power_levels", diff --git a/tests/server.py b/tests/server.py index df3f1564c9..9689e6a0cd 100644 --- a/tests/server.py +++ b/tests/server.py @@ -25,6 +25,7 @@ from typing import ( Callable, Dict, Iterable, + List, MutableMapping, Optional, Tuple, @@ -121,7 +122,15 @@ class FakeChannel: @property def json_body(self) -> JsonDict: - return json.loads(self.text_body) + body = json.loads(self.text_body) + assert isinstance(body, dict) + return body + + @property + def json_list(self) -> List[JsonDict]: + body = json.loads(self.text_body) + assert isinstance(body, list) + return body @property def text_body(self) -> str: diff --git a/tests/unittest.py b/tests/unittest.py index 66ce92f4a6..bec4a3d023 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -28,6 +28,7 @@ from typing import ( Generic, Iterable, List, + NoReturn, Optional, Tuple, Type, @@ -39,7 +40,7 @@ from unittest.mock import Mock, patch import canonicaljson import signedjson.key import unpaddedbase64 -from typing_extensions import Protocol +from typing_extensions import Concatenate, ParamSpec, Protocol from twisted.internet.defer import Deferred, ensureDeferred from twisted.python.failure import Failure @@ -67,7 +68,7 @@ from synapse.logging.context import ( from synapse.rest import RegisterServletsFunc from synapse.server import HomeServer from synapse.storage.keys import FetchKeyResult -from synapse.types import JsonDict, UserID, create_requester +from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree @@ -88,6 +89,10 @@ setup_logging() TV = TypeVar("TV") _ExcType = TypeVar("_ExcType", bound=BaseException, covariant=True) +P = ParamSpec("P") +R = TypeVar("R") +S = TypeVar("S") + class _TypedFailure(Generic[_ExcType], Protocol): """Extension to twisted.Failure, where the 'value' has a certain type.""" @@ -97,7 +102,7 @@ class _TypedFailure(Generic[_ExcType], Protocol): ... -def around(target): +def around(target: TV) -> Callable[[Callable[Concatenate[S, P], R]], None]: """A CLOS-style 'around' modifier, which wraps the original method of the given instance with another piece of code. @@ -106,11 +111,11 @@ def around(target): return orig(*args, **kwargs) """ - def _around(code): + def _around(code: Callable[Concatenate[S, P], R]) -> None: name = code.__name__ orig = getattr(target, name) - def new(*args, **kwargs): + def new(*args: P.args, **kwargs: P.kwargs) -> R: return code(orig, *args, **kwargs) setattr(target, name, new) @@ -131,7 +136,7 @@ class TestCase(unittest.TestCase): level = getattr(method, "loglevel", getattr(self, "loglevel", None)) @around(self) - def setUp(orig): + def setUp(orig: Callable[[], R]) -> R: # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if current_context(): @@ -144,7 +149,7 @@ class TestCase(unittest.TestCase): if level is not None and old_level != level: @around(self) - def tearDown(orig): + def tearDown(orig: Callable[[], R]) -> R: ret = orig() logging.getLogger().setLevel(old_level) return ret @@ -158,7 +163,7 @@ class TestCase(unittest.TestCase): return orig() @around(self) - def tearDown(orig): + def tearDown(orig: Callable[[], R]) -> R: ret = orig() # force a GC to workaround problems with deferreds leaking logcontexts when # they are GCed (see the logcontext docs) @@ -167,7 +172,7 @@ class TestCase(unittest.TestCase): return ret - def assertObjectHasAttributes(self, attrs, obj): + def assertObjectHasAttributes(self, attrs: Dict[str, object], obj: object) -> None: """Asserts that the given object has each of the attributes given, and that the value of each matches according to assertEqual.""" for key in attrs.keys(): @@ -178,12 +183,12 @@ class TestCase(unittest.TestCase): except AssertionError as e: raise (type(e))(f"Assert error for '.{key}':") from e - def assert_dict(self, required, actual): + def assert_dict(self, required: dict, actual: dict) -> None: """Does a partial assert of a dict. Args: - required (dict): The keys and value which MUST be in 'actual'. - actual (dict): The test result. Extra keys will not be checked. + required: The keys and value which MUST be in 'actual'. + actual: The test result. Extra keys will not be checked. """ for key in required: self.assertEqual( @@ -191,31 +196,31 @@ class TestCase(unittest.TestCase): ) -def DEBUG(target): +def DEBUG(target: TV) -> TV: """A decorator to set the .loglevel attribute to logging.DEBUG. Can apply to either a TestCase or an individual test method.""" - target.loglevel = logging.DEBUG + target.loglevel = logging.DEBUG # type: ignore[attr-defined] return target -def INFO(target): +def INFO(target: TV) -> TV: """A decorator to set the .loglevel attribute to logging.INFO. Can apply to either a TestCase or an individual test method.""" - target.loglevel = logging.INFO + target.loglevel = logging.INFO # type: ignore[attr-defined] return target -def logcontext_clean(target): +def logcontext_clean(target: TV) -> TV: """A decorator which marks the TestCase or method as 'logcontext_clean' ... ie, any logcontext errors should cause a test failure """ - def logcontext_error(msg): + def logcontext_error(msg: str) -> NoReturn: raise AssertionError("logcontext error: %s" % (msg)) patcher = patch("synapse.logging.context.logcontext_error", new=logcontext_error) - return patcher(target) + return patcher(target) # type: ignore[call-overload] class HomeserverTestCase(TestCase): @@ -255,7 +260,7 @@ class HomeserverTestCase(TestCase): method = getattr(self, methodName) self._extra_config = getattr(method, "_extra_config", None) - def setUp(self): + def setUp(self) -> None: """ Set up the TestCase by calling the homeserver constructor, optionally hijacking the authentication system to return a fixed user, and then @@ -306,7 +311,9 @@ class HomeserverTestCase(TestCase): ) ) - async def get_user_by_access_token(token=None, allow_guest=False): + async def get_user_by_access_token( + token: Optional[str] = None, allow_guest: bool = False + ) -> JsonDict: assert self.helper.auth_user_id is not None return { "user": UserID.from_string(self.helper.auth_user_id), @@ -314,7 +321,11 @@ class HomeserverTestCase(TestCase): "is_guest": False, } - async def get_user_by_req(request, allow_guest=False): + async def get_user_by_req( + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + ) -> Requester: assert self.helper.auth_user_id is not None return create_requester( UserID.from_string(self.helper.auth_user_id), @@ -339,11 +350,11 @@ class HomeserverTestCase(TestCase): if hasattr(self, "prepare"): self.prepare(self.reactor, self.clock, self.hs) - def tearDown(self): + def tearDown(self) -> None: # Reset to not use frozen dicts. events.USE_FROZEN_DICTS = False - def wait_on_thread(self, deferred, timeout=10): + def wait_on_thread(self, deferred: Deferred, timeout: int = 10) -> None: """ Wait until a Deferred is done, where it's waiting on a real thread. """ @@ -374,7 +385,7 @@ class HomeserverTestCase(TestCase): clock (synapse.util.Clock): The Clock, associated with the reactor. Returns: - A homeserver (synapse.server.HomeServer) suitable for testing. + A homeserver suitable for testing. Function to be overridden in subclasses. """ @@ -408,7 +419,7 @@ class HomeserverTestCase(TestCase): "/_synapse/admin": servlet_resource, } - def default_config(self): + def default_config(self) -> JsonDict: """ Get a default HomeServer config dict. """ @@ -421,7 +432,9 @@ class HomeserverTestCase(TestCase): return config - def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: """ Prepare for the test. This involves things like mocking out parts of the homeserver, or building test data common across the whole test @@ -519,7 +532,7 @@ class HomeserverTestCase(TestCase): config_obj.parse_config_dict(config, "", "") kwargs["config"] = config_obj - async def run_bg_updates(): + async def run_bg_updates() -> None: with LoggingContext("run_bg_updates"): self.get_success(stor.db_pool.updates.run_background_updates(False)) @@ -538,11 +551,7 @@ class HomeserverTestCase(TestCase): """ self.reactor.pump([by] * 100) - def get_success( - self, - d: Awaitable[TV], - by: float = 0.0, - ) -> TV: + def get_success(self, d: Awaitable[TV], by: float = 0.0) -> TV: deferred: Deferred[TV] = ensureDeferred(d) # type: ignore[arg-type] self.pump(by=by) return self.successResultOf(deferred) @@ -755,7 +764,7 @@ class FederatingHomeserverTestCase(HomeserverTestCase): OTHER_SERVER_NAME = "other.example.com" OTHER_SERVER_SIGNATURE_KEY = signedjson.key.generate_signing_key("test") - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) # poke the other server's signing key into the key store, so that we don't @@ -879,7 +888,7 @@ def _auth_header_for_request( ) -def override_config(extra_config): +def override_config(extra_config: JsonDict) -> Callable[[TV], TV]: """A decorator which can be applied to test functions to give additional HS config For use @@ -892,12 +901,13 @@ def override_config(extra_config): ... Args: - extra_config(dict): Additional config settings to be merged into the default + extra_config: Additional config settings to be merged into the default config dict before instantiating the test homeserver. """ - def decorator(func): - func._extra_config = extra_config + def decorator(func: TV) -> TV: + # This attribute is being defined. + func._extra_config = extra_config # type: ignore[attr-defined] return func return decorator -- cgit 1.5.1 From 583f22780f44157c50bc2dc5c242e88cc18c7886 Mon Sep 17 00:00:00 2001 From: Šimon Brandner Date: Wed, 27 Jul 2022 20:46:57 +0200 Subject: Use stable prefixes for MSC3827: filtering of `/publicRooms` by room type (#13370) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Šimon Brandner --- changelog.d/13370.feature | 1 + synapse/api/constants.py | 2 +- synapse/config/experimental.py | 3 --- synapse/handlers/room_list.py | 2 +- synapse/rest/client/versions.py | 4 ++-- synapse/storage/databases/main/room.py | 2 +- tests/rest/client/test_rooms.py | 5 ++--- 7 files changed, 8 insertions(+), 11 deletions(-) create mode 100644 changelog.d/13370.feature (limited to 'tests') diff --git a/changelog.d/13370.feature b/changelog.d/13370.feature new file mode 100644 index 0000000000..3a49bc2778 --- /dev/null +++ b/changelog.d/13370.feature @@ -0,0 +1 @@ +Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 2653764119..789859e69e 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -268,4 +268,4 @@ class PublicRoomsFilterFields: """ GENERIC_SEARCH_TERM: Final = "generic_search_term" - ROOM_TYPES: Final = "org.matrix.msc3827.room_types" + ROOM_TYPES: Final = "room_types" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 1902222d7b..c2ecd977cd 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -88,8 +88,5 @@ class ExperimentalConfig(Config): # MSC3715: dir param on /relations. self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False) - # MSC3827: Filtering of /publicRooms by room type - self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False) - # MSC3848: Introduce errcodes for specific event sending failures self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 29868eb743..bb0bdb8e6f 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -182,7 +182,7 @@ class RoomListHandler: == HistoryVisibility.WORLD_READABLE, "guest_can_join": room["guest_access"] == "can_join", "join_rule": room["join_rules"], - "org.matrix.msc3827.room_type": room["room_type"], + "room_type": room["room_type"], } # Filter out Nones – rather omit the field altogether diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index f4f06563dd..0366986755 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -95,8 +95,8 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, # Supports receiving private read receipts as per MSC2285 "org.matrix.msc2285": self.config.experimental.msc2285_enabled, - # Supports filtering of /publicRooms by room type MSC3827 - "org.matrix.msc3827": self.config.experimental.msc3827_enabled, + # Supports filtering of /publicRooms by room type as per MSC3827 + "org.matrix.msc3827.stable": True, # Adds support for importing historical messages as per MSC2716 "org.matrix.msc2716": self.config.experimental.msc2716_enabled, # Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030 diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index d6d485507b..0f1f0d11ea 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -207,7 +207,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _construct_room_type_where_clause( self, room_types: Union[List[Union[str, None]], None] ) -> Tuple[Union[str, None], List[str]]: - if not room_types or not self.config.experimental.msc3827_enabled: + if not room_types: return None, [] else: # We use None when we want get rooms without a type diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 2272d55d84..aa2f578441 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -2070,7 +2070,6 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): config = self.default_config() config["allow_public_rooms_without_auth"] = True - config["experimental_features"] = {"msc3827_enabled": True} self.hs = self.setup_test_homeserver(config=config) self.url = b"/_matrix/client/r0/publicRooms" @@ -2123,13 +2122,13 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): chunk, count = self.make_public_rooms_request([None]) self.assertEqual(count, 1) - self.assertEqual(chunk[0].get("org.matrix.msc3827.room_type", None), None) + self.assertEqual(chunk[0].get("room_type", None), None) def test_returns_only_space_based_on_filter(self) -> None: chunk, count = self.make_public_rooms_request(["m.space"]) self.assertEqual(count, 1) - self.assertEqual(chunk[0].get("org.matrix.msc3827.room_type", None), "m.space") + self.assertEqual(chunk[0].get("room_type", None), "m.space") def test_returns_both_rooms_and_space_based_on_filter(self) -> None: chunk, count = self.make_public_rooms_request(["m.space", None]) -- cgit 1.5.1 From 98fb610cc043e4f6ba77f78aaecef6b646bf61d6 Mon Sep 17 00:00:00 2001 From: 3nprob <74199244+3nprob@users.noreply.github.com> Date: Fri, 29 Jul 2022 10:29:23 +0000 Subject: Revert "Drop support for delegating email validation (#13192)" (#13406) Reverts commit fa71bb18b527d1a3e2629b48640ea67fff2f8c59, and tweaks documentation. Signed-off-by: 3nprob --- changelog.d/13406.misc | 1 + docs/upgrade.md | 13 - docs/usage/configuration/config_documentation.md | 362 +++++++++++------------ synapse/app/homeserver.py | 3 +- synapse/config/emailconfig.py | 46 ++- synapse/config/registration.py | 14 +- synapse/handlers/identity.py | 56 +++- synapse/handlers/ui_auth/checkers.py | 21 +- synapse/rest/client/account.py | 106 ++++--- synapse/rest/client/register.py | 59 ++-- synapse/rest/synapse/client/password_reset.py | 8 +- tests/rest/client/test_register.py | 2 +- 12 files changed, 425 insertions(+), 266 deletions(-) create mode 100644 changelog.d/13406.misc (limited to 'tests') diff --git a/changelog.d/13406.misc b/changelog.d/13406.misc new file mode 100644 index 0000000000..f78e052e87 --- /dev/null +++ b/changelog.d/13406.misc @@ -0,0 +1 @@ +Warn instead of error when using `account_threepid_delegates.email`, which was deprecated in 1.64.0rc1. diff --git a/docs/upgrade.md b/docs/upgrade.md index fadb8e7ffb..73ed209975 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -91,19 +91,6 @@ process, for example: # Upgrading to v1.64.0 -## Delegation of email validation no longer supported - -As of this version, Synapse no longer allows the tasks of verifying email address -ownership, and password reset confirmation, to be delegated to an identity server. - -To continue to allow users to add email addresses to their homeserver accounts, -and perform password resets, make sure that Synapse is configured with a -working email server in the `email` configuration section (including, at a -minimum, a `notif_from` setting.) - -Specifying an `email` setting under `account_threepid_delegates` will now cause -an error at startup. - ## Changes to the event replication streams Synapse now includes a flag indicating if an event is an outlier when diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index eefcc7829d..d8616f7dbd 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1,11 +1,11 @@ # Configuring Synapse -This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified -through the many configuration settings documented here — each config option is explained, +This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified +through the many configuration settings documented here — each config option is explained, including what the default is, how to change the default and what sort of behaviour the setting governs. -Also included is an example configuration for each setting. If you don't want to spend a lot of time +Also included is an example configuration for each setting. If you don't want to spend a lot of time thinking about options, the config as generated sets sensible defaults for all values. Do note however that the -database defaults to SQLite, which is not recommended for production usage. You can read more on this subject +database defaults to SQLite, which is not recommended for production usage. You can read more on this subject [here](../../setup/installation.md#using-postgresql). ## Config Conventions @@ -26,17 +26,17 @@ messages from the database after 5 minutes, rather than 5 months. In addition, configuration options referring to size use the following suffixes: * `M` = MiB, or 1,048,576 bytes -* `K` = KiB, or 1024 bytes +* `K` = KiB, or 1024 bytes For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes -for a user avatar. +for a user avatar. -### YAML +### YAML The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules apply if you want your config file to be read properly. A few helpful things to know: -* `#` before any option in the config will comment out that setting and either a default (if available) will +* `#` before any option in the config will comment out that setting and either a default (if available) will be applied or Synapse will ignore the setting. Thus, in example #1 below, the setting will be read and - applied, but in example #2 the setting will not be read and a default will be applied. + applied, but in example #2 the setting will not be read and a default will be applied. Example #1: ```yaml @@ -50,13 +50,13 @@ apply if you want your config file to be read properly. A few helpful things to will determine whether a given setting is read as part of another setting, or considered on its own. Thus, in example #1, the `enabled` setting is read as a sub-option of the `presence` setting, and will be properly applied. - + However, the lack of indentation before the `enabled` setting in example #2 means that when reading the config, Synapse will consider both `presence` and `enabled` as different settings. In this case, `presence` has no value, and thus a default applied, and `enabled` is an option that Synapse doesn't recognize and thus ignores. - - Example #1: + + Example #1: ```yaml presence: enabled: false @@ -66,11 +66,11 @@ apply if you want your config file to be read properly. A few helpful things to presence: enabled: false ``` - In this manual, all top-level settings (ones with no indentation) are identified - at the beginning of their section (i.e. "### `example_setting`") and - the sub-options, if any, are identified and listed in the body of the section. + In this manual, all top-level settings (ones with no indentation) are identified + at the beginning of their section (i.e. "### `example_setting`") and + the sub-options, if any, are identified and listed in the body of the section. In addition, each setting has an example of its usage, with the proper indentation - shown. + shown. ## Contents [Modules](#modules) @@ -126,7 +126,7 @@ documentation on how to configure or create custom modules for Synapse. --- ### `modules` -Use the `module` sub-option to add modules under this option to extend functionality. +Use the `module` sub-option to add modules under this option to extend functionality. The `module` setting then has a sub-option, `config`, which can be used to define some configuration for the `module`. @@ -166,11 +166,11 @@ The `server_name` cannot be changed later so it is important to configure this correctly before you start Synapse. It should be all lowercase and may contain an explicit port. -There is no default for this option. - +There is no default for this option. + Example configuration #1: ```yaml -server_name: matrix.org +server_name: matrix.org ``` Example configuration #2: ```yaml @@ -188,7 +188,7 @@ pid_file: DATADIR/homeserver.pid --- ### `web_client_location` -The absolute URL to the web client which `/` will redirect to. Defaults to none. +The absolute URL to the web client which `/` will redirect to. Defaults to none. Example configuration: ```yaml @@ -217,7 +217,7 @@ By default, other servers will try to reach our server on port 8448, which can be inconvenient in some environments. Provided `https:///` on port 443 is routed to Synapse, this -option configures Synapse to serve a file at `https:///.well-known/matrix/server`. +option configures Synapse to serve a file at `https:///.well-known/matrix/server`. This will tell other servers to send traffic to port 443 instead. This option currently defaults to false. @@ -235,7 +235,7 @@ serve_server_wellknown: true This option allows server runners to add arbitrary key-value pairs to the [client-facing `.well-known` response](https://spec.matrix.org/latest/client-server-api/#well-known-uri). Note that the `public_baseurl` config option must be provided for Synapse to serve a response to `/.well-known/matrix/client` at all. -If this option is provided, it parses the given yaml to json and +If this option is provided, it parses the given yaml to json and serves it on `/.well-known/matrix/client` endpoint alongside the standard properties. @@ -243,16 +243,16 @@ alongside the standard properties. Example configuration: ```yaml -extra_well_known_client_content : +extra_well_known_client_content : option1: value1 option2: value2 ``` --- ### `soft_file_limit` - + Set the soft limit on the number of file descriptors synapse can use. Zero is used to indicate synapse should set the soft limit to the hard limit. -Defaults to 0. +Defaults to 0. Example configuration: ```yaml @@ -262,8 +262,8 @@ soft_file_limit: 3 ### `presence` Presence tracking allows users to see the state (e.g online/offline) -of other local and remote users. Set the `enabled` sub-option to false to -disable presence tracking on this homeserver. Defaults to true. +of other local and remote users. Set the `enabled` sub-option to false to +disable presence tracking on this homeserver. Defaults to true. This option replaces the previous top-level 'use_presence' option. Example configuration: @@ -274,8 +274,8 @@ presence: --- ### `require_auth_for_profile_requests` -Whether to require authentication to retrieve profile data (avatars, display names) of other -users through the client API. Defaults to false. Note that profile data is also available +Whether to require authentication to retrieve profile data (avatars, display names) of other +users through the client API. Defaults to false. Note that profile data is also available via the federation API, unless `allow_profile_lookup_over_federation` is set to false. Example configuration: @@ -286,11 +286,11 @@ require_auth_for_profile_requests: true ### `limit_profile_requests_to_users_who_share_rooms` Use this option to require a user to share a room with another user in order -to retrieve their profile information. Only checked on Client-Server +to retrieve their profile information. Only checked on Client-Server requests. Profile requests from other servers should be checked by the requesting server. Defaults to false. -Example configuration: +Example configuration: ```yaml limit_profile_requests_to_users_who_share_rooms: true ``` @@ -336,7 +336,7 @@ The default room version for newly created rooms on this server. Known room versions are listed [here](https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions) For example, for room version 1, `default_room_version` should be set -to "1". +to "1". Currently defaults to "9". @@ -348,7 +348,7 @@ default_room_version: "8" ### `gc_thresholds` The garbage collection threshold parameters to pass to `gc.set_threshold`, if defined. -Defaults to none. +Defaults to none. Example configuration: ```yaml @@ -358,7 +358,7 @@ gc_thresholds: [700, 10, 10] ### `gc_min_interval` The minimum time in seconds between each GC for a generation, regardless of -the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` +the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive generation 0 GCs, etc. Defaults to `[1s, 10s, 30s]`. @@ -400,7 +400,7 @@ enable_search: false ``` --- ### `ip_range_blacklist` - + This option prevents outgoing requests from being sent to the specified blacklisted IP address CIDR ranges. If this option is not specified then it defaults to private IP address ranges (see the example below). @@ -463,13 +463,13 @@ configuration. Sub-options for each listener include: -* `port`: the TCP port to bind to. +* `port`: the TCP port to bind to. * `bind_addresses`: a list of local addresses to listen on. The default is 'all local interfaces'. * `type`: the type of listener. Normally `http`, but other valid options are: - + * `manhole`: (see the docs [here](../../manhole.md)), * `metrics`: (see the docs [here](../../metrics-howto.md)), @@ -585,7 +585,7 @@ forward extremities reaches a given threshold, Synapse will send an `org.matrix.dummy_event` event, which will reduce the forward extremities in the room. -This setting defines the threshold (i.e. number of forward extremities in the room) at which dummy events are sent. +This setting defines the threshold (i.e. number of forward extremities in the room) at which dummy events are sent. The default value is 10. Example configuration: @@ -612,7 +612,7 @@ Useful options for Synapse admins. ### `admin_contact` -How to reach the server admin, used in `ResourceLimitError`. Defaults to none. +How to reach the server admin, used in `ResourceLimitError`. Defaults to none. Example configuration: ```yaml @@ -622,7 +622,7 @@ admin_contact: 'mailto:admin@server.com' ### `hs_disabled` and `hs_disabled_message` Blocks users from connecting to the homeserver and provides a human-readable reason -why the connection was blocked. Defaults to false. +why the connection was blocked. Defaults to false. Example configuration: ```yaml @@ -632,20 +632,20 @@ hs_disabled_message: 'Reason for why the HS is blocked' --- ### `limit_usage_by_mau` -This option disables/enables monthly active user blocking. Used in cases where the admin or -server owner wants to limit to the number of monthly active users. When enabled and a limit is +This option disables/enables monthly active user blocking. Used in cases where the admin or +server owner wants to limit to the number of monthly active users. When enabled and a limit is reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`. Defaults to false. If this is enabled, a value for `max_mau_value` must also be set. Example configuration: ```yaml -limit_usage_by_mau: true +limit_usage_by_mau: true ``` --- ### `max_mau_value` -This option sets the hard limit of monthly active users above which the server will start -blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0. +This option sets the hard limit of monthly active users above which the server will start +blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0. Example configuration: ```yaml @@ -658,7 +658,7 @@ The option `mau_trial_days` is a means to add a grace period for active users. I means that users must be active for the specified number of days before they can be considered active and guards against the case where lots of users sign up in a short space of time never to return after their initial -session. Defaults to 0. +session. Defaults to 0. Example configuration: ```yaml @@ -674,7 +674,7 @@ use the value of `mau_trial_days` instead. Example configuration: ```yaml -mau_appservice_trial_days: +mau_appservice_trial_days: my_appservice_id: 3 another_appservice_id: 6 ``` @@ -696,7 +696,7 @@ mau_limit_alerting: false If enabled, the metrics for the number of monthly active users will be populated, however no one will be limited based on these numbers. If `limit_usage_by_mau` -is true, this is implied to be true. Defaults to false. +is true, this is implied to be true. Defaults to false. Example configuration: ```yaml @@ -720,7 +720,7 @@ mau_limit_reserved_threepids: ### `server_context` This option is used by phonehome stats to group together related servers. -Defaults to none. +Defaults to none. Example configuration: ```yaml @@ -736,11 +736,11 @@ resource-constrained. Options for this setting include: * `enabled`: whether this check is enabled. Defaults to false. * `complexity`: the limit above which rooms cannot be joined. The default is 1.0. * `complexity_error`: override the error which is returned when the room is too complex with a - custom message. + custom message. * `admins_can_join`: allow server admins to join complex rooms. Default is false. Room complexity is an arbitrary measure based on factors such as the number of -users in the room. +users in the room. Example configuration: ```yaml @@ -775,7 +775,7 @@ allow_per_room_profiles: false ### `max_avatar_size` The largest permissible file size in bytes for a user avatar. Defaults to no restriction. -Use M for MB and K for KB. +Use M for MB and K for KB. Note that user avatar changes will not work if this is set without using Synapse's media repository. @@ -808,7 +808,7 @@ Example configuration: redaction_retention_period: 28d ``` --- -### `user_ips_max_age` +### `user_ips_max_age` How long to track users' last seen time and IPs in the database. @@ -823,7 +823,7 @@ user_ips_max_age: 14d Inhibits the `/requestToken` endpoints from returning an error that might leak information about whether an e-mail address is in use or not on this -homeserver. Defaults to false. +homeserver. Defaults to false. Note that for some endpoints the error situation is the e-mail already being used, and for others the error is entering the e-mail being unused. If this option is enabled, instead of returning an error, these endpoints will @@ -859,9 +859,9 @@ next_link_domain_whitelist: ["matrix.org"] ### `templates` and `custom_template_directory` These options define templates to use when generating email or HTML page contents. -The `custom_template_directory` determines which directory Synapse will try to +The `custom_template_directory` determines which directory Synapse will try to find template files in to use to generate email or HTML page contents. -If not set, or a file is not found within the template directory, a default +If not set, or a file is not found within the template directory, a default template from within the Synapse package will be used. See [here](../../templates.md) for more @@ -884,26 +884,26 @@ the `allowed_lifetime_min` and `allowed_lifetime_max` config options. If this feature is enabled, Synapse will regularly look for and purge events which are older than the room's maximum retention period. Synapse will also -filter events received over federation so that events that should have been -purged are ignored and not stored again. +filter events received over federation so that events that should have been +purged are ignored and not stored again. The message retention policies feature is disabled by default. This setting has the following sub-options: * `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the - 'm.room.retention' state event. This option is further specified by the - `min_lifetime` and `max_lifetime` sub-options associated with it. Note that the - value of `min_lifetime` doesn't matter much because Synapse doesn't take it into account yet. + 'm.room.retention' state event. This option is further specified by the + `min_lifetime` and `max_lifetime` sub-options associated with it. Note that the + value of `min_lifetime` doesn't matter much because Synapse doesn't take it into account yet. -* `allowed_lifetime_min` and `allowed_lifetime_max`: Retention policy limits. If - set, and the state of a room contains a `m.room.retention` event in its state +* `allowed_lifetime_min` and `allowed_lifetime_max`: Retention policy limits. If + set, and the state of a room contains a `m.room.retention` event in its state which contains a `min_lifetime` or a `max_lifetime` that's out of these bounds, Synapse will cap the room's policy to these limits when running purge jobs. * `purge_jobs` and the associated `shortest_max_lifetime` and `longest_max_lifetime` sub-options: Server admins can define the settings of the background jobs purging the events whose lifetime has expired under the `purge_jobs` section. - + If no configuration is provided for this option, a single job will be set up to delete expired events in every room daily. @@ -915,7 +915,7 @@ This setting has the following sub-options: range are optional, e.g. a job with no `shortest_max_lifetime` and a `longest_max_lifetime` of '3d' will handle every room with a retention policy whose `max_lifetime` is lower than or equal to three days. - + The rationale for this per-job configuration is that some rooms might have a retention policy with a low `max_lifetime`, where history needs to be purged of outdated messages on a more frequent basis than for the rest of the rooms @@ -944,7 +944,7 @@ retention: - longest_max_lifetime: 3d interval: 12h - shortest_max_lifetime: 3d - interval: 1d + interval: 1d ``` --- ## TLS ## @@ -956,11 +956,11 @@ Options related to TLS. This option specifies a PEM-encoded X509 certificate for TLS. This certificate, as of Synapse 1.0, will need to be a valid and verifiable -certificate, signed by a recognised Certificate Authority. Defaults to none. +certificate, signed by a recognised Certificate Authority. Defaults to none. Be sure to use a `.pem` file that includes the full certificate chain including any intermediate certificates (for instance, if using certbot, use -`fullchain.pem` as your certificate, not `cert.pem`). +`fullchain.pem` as your certificate, not `cert.pem`). Example configuration: ```yaml @@ -969,7 +969,7 @@ tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt" --- ### `tls_private_key_path` -PEM-encoded private key for TLS. Defaults to none. +PEM-encoded private key for TLS. Defaults to none. Example configuration: ```yaml @@ -1126,31 +1126,31 @@ Caching can be configured through the following sub-options: This can also be set by the `SYNAPSE_CACHE_FACTOR` environment variable. Setting by environment variable takes priority over setting through the config file. - + Defaults to 0.5, which will halve the size of all caches. * `per_cache_factors`: A dictionary of cache name to cache factor for that individual cache. Overrides the global cache factor for a given cache. - + These can also be set through environment variables comprised of `SYNAPSE_CACHE_FACTOR_` + the name of the cache in capital letters and underscores. Setting by environment variable takes priority over setting through the config file. Ex. `SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0` - + Some caches have '*' and other characters that are not alphanumeric or underscores. These caches can be named with or without the special characters stripped. For example, to specify the cache factor for `*stateGroupCache*` via an environment variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. - + * `expire_caches`: Controls whether cache entries are evicted after a specified time period. Defaults to true. Set to false to disable this feature. Note that never expiring - caches may result in excessive memory usage. + caches may result in excessive memory usage. * `cache_entry_ttl`: If `expire_caches` is enabled, this flag controls how long an entry can be in a cache without having been accessed before being evicted. - Defaults to 30m. + Defaults to 30m. * `sync_response_cache_duration`: Controls how long the results of a /sync request are cached for after a successful response is returned. A higher duration can help clients @@ -1161,8 +1161,8 @@ Caching can be configured through the following sub-options: *Changed in Synapse 1.62.0*: The default was changed from 0 to 2m. * `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and - `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory - usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu) + `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory + usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu) to utilize this option, and all three of the options must be specified for this feature to work. This option defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided. @@ -1175,7 +1175,7 @@ Caching can be configured through the following sub-options: for this option. * `min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches - from being emptied while Synapse is evicting due to memory. There is no default value for this option. + from being emptied while Synapse is evicting due to memory. There is no default value for this option. Example configuration: ```yaml @@ -1199,7 +1199,7 @@ The cache factors (i.e. `caches.global_factor` and `caches.per_cache_factors`) kill -HUP [PID_OF_SYNAPSE_PROCESS] ``` -If you are running multiple workers, you must individually update the worker +If you are running multiple workers, you must individually update the worker config file and send this signal to each worker process. If you're using the [example systemd service](https://github.com/matrix-org/synapse/blob/develop/contrib/systemd/matrix-synapse.service) @@ -1219,7 +1219,7 @@ its data. Associated sub-options: * `name`: this option specifies the database engine to use: either `sqlite3` (for SQLite) - or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite. + or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite. * `txn_limit` gives the maximum number of transactions to run per connection before reconnecting. Defaults to 0, which means no limit. @@ -1355,7 +1355,7 @@ databases: ``` --- ## Logging ## -Config options related to logging. +Config options related to logging. --- ### `log_config` @@ -1368,7 +1368,7 @@ log_config: "CONFDIR/SERVERNAME.log.config" ``` --- ## Ratelimiting ## -Options related to ratelimiting in Synapse. +Options related to ratelimiting in Synapse. Each ratelimiting configuration is made of two parameters: - `per_second`: number of requests a client can send per second. @@ -1378,7 +1378,7 @@ Each ratelimiting configuration is made of two parameters: Ratelimiting settings for client messaging. - + This is a ratelimiting option for messages that ratelimits sending based on the account the client is using. It defaults to: `per_second: 0.2`, `burst_count: 10`. @@ -1392,7 +1392,7 @@ rc_message: ### `rc_registration` This option ratelimits registration requests based on the client's IP address. -It defaults to `per_second: 0.17`, `burst_count: 3`. +It defaults to `per_second: 0.17`, `burst_count: 3`. Example configuration: ```yaml @@ -1403,7 +1403,7 @@ rc_registration: --- ### `rc_registration_token_validity` -This option checks the validity of registration tokens that ratelimits requests based on +This option checks the validity of registration tokens that ratelimits requests based on the client's IP address. Defaults to `per_second: 0.1`, `burst_count: 5`. @@ -1412,18 +1412,18 @@ Example configuration: rc_registration_token_validity: per_second: 0.3 burst_count: 6 -``` +``` --- ### `rc_login` This option specifies several limits for login: * `address` ratelimits login requests based on the client's IP address. Defaults to `per_second: 0.17`, `burst_count: 3`. - + * `account` ratelimits login requests based on the account the client is attempting to log into. Defaults to `per_second: 0.17`, `burst_count: 3`. - + * `failted_attempts` ratelimits login requests based on the account the client is attempting to log into, based on the amount of failed login attempts for this account. Defaults to `per_second: 0.17`, `burst_count: 3`. @@ -1444,9 +1444,9 @@ rc_login: --- ### `rc_admin_redaction` -This option sets ratelimiting redactions by room admins. If this is not explicitly +This option sets ratelimiting redactions by room admins. If this is not explicitly set then it uses the same ratelimiting as per `rc_message`. This is useful -to allow room admins to deal with abuse quickly. +to allow room admins to deal with abuse quickly. Example configuration: ```yaml @@ -1459,12 +1459,12 @@ rc_admin_redaction: This option allows for ratelimiting number of rooms a user can join. This setting has the following sub-options: -* `local`: ratelimits when users are joining rooms the server is already in. +* `local`: ratelimits when users are joining rooms the server is already in. Defaults to `per_second: 0.1`, `burst_count: 10`. * `remote`: ratelimits when users are trying to join rooms not on the server (which can be more computationally expensive than restricting locally). Defaults to - `per_second: 0.01`, `burst_count: 10` + `per_second: 0.01`, `burst_count: 10` Example configuration: ```yaml @@ -1510,9 +1510,9 @@ rc_3pid_validation: --- ### `rc_invites` -This option sets ratelimiting how often invites can be sent in a room or to a +This option sets ratelimiting how often invites can be sent in a room or to a specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and -`per_user` defaults to `per_second: 0.003`, `burst_count: 5`. +`per_user` defaults to `per_second: 0.003`, `burst_count: 5`. Client requests that invite user(s) when [creating a room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom) @@ -1562,7 +1562,7 @@ rc_third_party_invite: --- ### `rc_federation` -Defines limits on federation requests. +Defines limits on federation requests. The `rc_federation` configuration has the following sub-options: * `window_size`: window size in milliseconds. Defaults to 1000. @@ -1591,7 +1591,7 @@ Sets outgoing federation transaction frequency for sending read-receipts, per-room. If we end up trying to send out more read-receipts, they will get buffered up -into fewer transactions. Defaults to 50. +into fewer transactions. Defaults to 50. Example configuration: ```yaml @@ -1602,9 +1602,9 @@ federation_rr_transactions_per_room_per_second: 40 Config options related to Synapse's media store. --- -### `enable_media_repo` +### `enable_media_repo` -Enable the media store service in the Synapse master. Defaults to true. +Enable the media store service in the Synapse master. Defaults to true. Set to false if you are using a separate media store worker. Example configuration: @@ -1629,7 +1629,7 @@ locations. Defaults to none. Associated sub-options are: * `store_local`: whether to store newly uploaded local files * `store_remote`: whether to store newly downloaded local files * `store_synchronous`: whether to wait for successful storage for local uploads -* `config`: sets a path to the resource through the `directory` option +* `config`: sets a path to the resource through the `directory` option Example configuration: ```yaml @@ -1648,7 +1648,7 @@ The largest allowed upload size in bytes. If you are using a reverse proxy you may also need to set this value in your reverse proxy's config. Defaults to 50M. Notably Nginx has a small max body size by default. -See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse. +See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse. Example configuration: ```yaml @@ -1670,14 +1670,14 @@ Whether to generate new thumbnails on the fly to precisely match the resolution requested by the client. If true then whenever a new resolution is requested by the client the server will generate a new thumbnail. If false the server will pick a thumbnail -from a precalculated list. Defaults to false. +from a precalculated list. Defaults to false. Example configuration: ```yaml dynamic_thumbnails: true ``` --- -### `thumbnail_sizes` +### `thumbnail_sizes` List of thumbnails to precalculate when an image is uploaded. Associated sub-options are: * `width` @@ -1795,7 +1795,7 @@ This option sets a list of IP address CIDR ranges that the URL preview spider is to access even if they are specified in `url_preview_ip_range_blacklist`. This is useful for specifying exceptions to wide-ranging blacklisted target IP ranges - e.g. for enabling URL previews for a specific private -website only visible in your network. Defaults to none. +website only visible in your network. Defaults to none. Example configuration: ```yaml @@ -1813,7 +1813,7 @@ This is more useful if you know there is an entire shape of URL that you know that will never want synapse to try to spider. Each list entry is a dictionary of url component attributes as returned -by urlparse.urlsplit as applied to the absolute form of the URL. See +by urlparse.urlsplit as applied to the absolute form of the URL. See [here](https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit) for more information. Some examples are: @@ -1888,8 +1888,8 @@ Example configuration: oEmbed allows for easier embedding content from a website. It can be used for generating URLs previews of services which support it. A default list of oEmbed providers is included with Synapse. Set `disable_default_providers` to true to disable using -these default oEmbed URLs. Use `additional_providers` to specify additional files with oEmbed configuration (each -should be in the form of providers.json). By default this list is empty. +these default oEmbed URLs. Use `additional_providers` to specify additional files with oEmbed configuration (each +should be in the form of providers.json). By default this list is empty. Example configuration: ```yaml @@ -1906,7 +1906,7 @@ See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha. --- ### `recaptcha_public_key` -This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is +This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is enabled. Example configuration: @@ -1914,9 +1914,9 @@ Example configuration: recaptcha_public_key: "YOUR_PUBLIC_KEY" ``` --- -### `recaptcha_private_key` +### `recaptcha_private_key` -This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is +This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is enabled. Example configuration: @@ -1927,7 +1927,7 @@ recaptcha_private_key: "YOUR_PRIVATE_KEY" ### `enable_registration_captcha` Set to true to enable ReCaptcha checks when registering, preventing signup -unless a captcha is answered. Requires a valid ReCaptcha public/private key. +unless a captcha is answered. Requires a valid ReCaptcha public/private key. Defaults to false. Example configuration: @@ -2005,7 +2005,7 @@ Registration can be rate-limited using the parameters in the [Ratelimiting](#rat ### `enable_registration` Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration, -you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration +you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration without any verification, you must also set `enable_registration_without_verification` to true. Example configuration: @@ -2029,7 +2029,7 @@ Time that a user's session remains valid for, after they log in. Note that this is not currently compatible with guest logins. -Note also that this is calculated at login time: changes are not applied retrospectively to users who have already +Note also that this is calculated at login time: changes are not applied retrospectively to users who have already logged in. By default, this is infinite. @@ -2047,7 +2047,7 @@ For more information about refresh tokens, please see the [manual](user_authenti Note that this only applies to clients which advertise support for refresh tokens. -Note also that this is calculated at login time and refresh time: changes are not applied to +Note also that this is calculated at login time and refresh time: changes are not applied to existing sessions until they are refreshed. By default, this is 5 minutes. @@ -2145,7 +2145,7 @@ Require users to submit a token during registration. Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md). Note that `enable_registration` must be set to true. Disabling this option will not delete any tokens previously generated. -Defaults to false. Set to true to enable. +Defaults to false. Set to true to enable. Example configuration: ```yaml @@ -2215,7 +2215,7 @@ their account. by the Matrix Identity Service API [specification](https://matrix.org/docs/spec/identity_service/latest).) -*Updated in Synapse 1.64.0*: No longer accepts an `email` option. +*Updated in Synapse 1.64.0*: The `email` option is deprecated. Example configuration: ```yaml @@ -2270,7 +2270,7 @@ By default, any room aliases included in this list will be created as a publicly joinable room when the first user registers for the homeserver. If the room already exists, make certain it is a publicly joinable room, i.e. the join rule of the room must be set to 'public'. You can find more options -relating to auto-joining rooms below. +relating to auto-joining rooms below. Example configuration: ```yaml @@ -2324,9 +2324,9 @@ effect if `autocreate_auto_join_rooms` is true. Possible values for this option are: * "public_chat": the room is joinable by anyone, including federated servers if `autocreate_auto_join_rooms_federated` is true (the default). -* "private_chat": an invitation is required to join these rooms. +* "private_chat": an invitation is required to join these rooms. * "trusted_private_chat": an invitation is required to join this room and the invitee is - assigned a power level of 100 upon joining the room. + assigned a power level of 100 upon joining the room. If a value of "private_chat" or "trusted_private_chat" is used then `auto_join_mxid_localpart` must also be configured. @@ -2363,7 +2363,7 @@ auto_join_mxid_localpart: system ``` --- ### `auto_join_rooms_for_guests` - + When `auto_join_rooms` is specified, setting this flag to false prevents guest accounts from being automatically joined to the rooms. @@ -2375,7 +2375,7 @@ auto_join_rooms_for_guests: false ``` --- ### `inhibit_user_in_use_error` - + Whether to inhibit errors raised when registering a new account if the user ID already exists. If turned on, requests to `/register/available` will always show a user ID as available, and Synapse won't raise an error when starting @@ -2395,7 +2395,7 @@ Config options related to metrics. --- ### `enable_metrics` -Set to true to enable collection and rendering of performance metrics. +Set to true to enable collection and rendering of performance metrics. Defaults to false. Example configuration: @@ -2406,11 +2406,11 @@ enable_metrics: true ### `sentry` Use this option to enable sentry integration. Provide the DSN assigned to you by sentry -with the `dsn` setting. +with the `dsn` setting. NOTE: While attempts are made to ensure that the logs don't contain any sensitive information, this cannot be guaranteed. By enabling -this option the sentry server may therefore receive sensitive +this option the sentry server may therefore receive sensitive information, and it in turn may then disseminate sensitive information through insecure notification channels if so configured. @@ -2424,7 +2424,7 @@ sentry: Flags to enable Prometheus metrics which are not suitable to be enabled by default, either for performance reasons or limited use. -Currently the only option is `known_servers`, which publishes +Currently the only option is `known_servers`, which publishes `synapse_federation_known_servers`, a gauge of the number of servers this homeserver knows about, including itself. May cause performance problems on large homeservers. @@ -2468,7 +2468,7 @@ Config settings related to the client/server API ### `room_prejoin_state:` Controls for the state that is shared with users who receive an invite -to a room. By default, the following state event types are shared with users who +to a room. By default, the following state event types are shared with users who receive invites to the room: - m.room.join_rules - m.room.canonical_alias @@ -2479,7 +2479,7 @@ receive invites to the room: - m.room.topic To change the default behavior, use the following sub-options: -* `disable_default_event_types`: set to true to disable the above defaults. If this +* `disable_default_event_types`: set to true to disable the above defaults. If this is enabled, only the event types listed in `additional_event_types` are shared. Defaults to false. * `additional_event_types`: Additional state event types to share with users when they are invited @@ -2569,7 +2569,7 @@ Example configuration: ```yaml signing_key_path: "CONFDIR/SERVERNAME.signing.key" ``` ---- +--- ### `old_signing_keys` The keys that the server used to sign messages with but won't use @@ -2621,7 +2621,7 @@ Options for each entry in the list include: If specified, we will check that the response is signed by at least one of the given keys. * `accept_keys_insecurely`: a boolean. Normally, if `verify_keys` is unset, - and `federation_verify_certificates` is not `true`, synapse will refuse + and `federation_verify_certificates` is not `true`, synapse will refuse to start, because this would allow anyone who can spoof DNS responses to masquerade as the trusted key server. If you know what you are doing and are sure that your network environment provides a secure connection @@ -2699,15 +2699,15 @@ This setting has the following sub-options: * `service`: By default, the user has to go to our login page first. If you'd like to allow IdP-initiated login, set `allow_unsolicited` to true under `sp` in the `service` section. -* `config_path`: specify a separate pysaml2 configuration file thusly: +* `config_path`: specify a separate pysaml2 configuration file thusly: `config_path: "CONFDIR/sp_conf.py"` * `saml_session_lifetime`: The lifetime of a SAML session. This defines how long a user has to complete the authentication process, if `allow_unsolicited` is unset. The default is 15 minutes. -* `user_mapping_provider`: Using this option, an external module can be provided as a - custom solution to mapping attributes returned from a saml provider onto a matrix user. The +* `user_mapping_provider`: Using this option, an external module can be provided as a + custom solution to mapping attributes returned from a saml provider onto a matrix user. The `user_mapping_provider` has the following attributes: - * `module`: The custom module's class. - * `config`: Custom configuration values for the module. Use the values provided in the + * `module`: The custom module's class. + * `config`: Custom configuration values for the module. Use the values provided in the example if you are using the built-in user_mapping_provider, or provide your own config values for a custom class if you are using one. This section will be passed as a Python dictionary to the module's `parse_config` method. The built-in provider takes the following two @@ -2724,7 +2724,7 @@ This setting has the following sub-options: MXID was always calculated dynamically rather than stored in a table. For backwards- compatibility, we will look for `user_ids` matching such a pattern before creating a new account. This setting controls the SAML attribute which will be used for this backwards-compatibility lookup. Typically it should be 'uid', but if the attribute maps are changed, it may be necessary to change it. - The default is 'uid'. + The default is 'uid'. * `attribute_requirements`: It is possible to configure Synapse to only allow logins if SAML attributes match particular values. The requirements can be listed under `attribute_requirements` as shown in the example. All of the listed attributes must @@ -2732,7 +2732,7 @@ This setting has the following sub-options: * `idp_entityid`: If the metadata XML contains multiple IdP entities then the `idp_entityid` option must be set to the entity to redirect users to. Most deployments only have a single IdP entity and so should omit this option. - + Once SAML support is enabled, a metadata file will be exposed at `https://:/_synapse/client/saml2/metadata.xml`, which you may be able to @@ -2793,16 +2793,16 @@ saml2_config: sur_name: "the Sysadmin" email_address": ["admin@example.com"] contact_type": technical - + saml_session_lifetime: 5m - + user_mapping_provider: - # Below options are intended for the built-in provider, they should be - # changed if using a custom module. + # Below options are intended for the built-in provider, they should be + # changed if using a custom module. config: mxid_source_attribute: displayName mxid_mapping: dotreplace - + grandfathered_mxid_source_attribute: upn attribute_requirements: @@ -2930,7 +2930,7 @@ Options for each entry include: * `localpart_template`: Jinja2 template for the localpart of the MXID. If this is not set, the user will be prompted to choose their - own username (see the documentation for the `sso_auth_account_details.html` + own username (see the documentation for the `sso_auth_account_details.html` template). This template can use the `localpart_from_email` filter. * `confirm_localpart`: Whether to prompt the user to validate (or @@ -2943,7 +2943,7 @@ Options for each entry include: * `email_template`: Jinja2 template for the email address of the user. If unset, no email address will be added to the account. - + * `extra_attributes`: a map of Jinja2 templates for extra attributes to send back to the client during login. Note that these are non-standard and clients will ignore them without modifications. @@ -2953,7 +2953,7 @@ Options for each entry include: in the ID Token. -It is possible to configure Synapse to only allow logins if certain attributes +It is possible to configure Synapse to only allow logins if certain attributes match particular values in the OIDC userinfo. The requirements can be listed under `attribute_requirements` as shown here: ```yaml @@ -2968,7 +2968,7 @@ userinfo by expanding the `scopes` section of the OIDC config to retrieve additional information from the OIDC provider. If the OIDC claim is a list, then the attribute must match any value in the list. -Otherwise, it must exactly match the value of the claim. Using the example +Otherwise, it must exactly match the value of the claim. Using the example above, the `family_name` claim MUST be "Stephensson", but the `groups` claim MUST contain "admin". @@ -3033,7 +3033,7 @@ cas_config: Additional settings to use with single-sign on systems such as OpenID Connect, SAML2 and CAS. -Server admins can configure custom templates for pages related to SSO. See +Server admins can configure custom templates for pages related to SSO. See [here](../../templates.md) for more information. Options include: @@ -3049,7 +3049,7 @@ Options include: required login flows) is whitelisted in addition to any URLs in this list. By default, this list contains only the login fallback page. * `update_profile_information`: Use this setting to keep a user's profile fields in sync with information from - the identity provider. Currently only syncing the displayname is supported. Fields + the identity provider. Currently only syncing the displayname is supported. Fields are checked on every SSO login, and are updated if necessary. Note that enabling this option will override user profile information, regardless of whether users have opted-out of syncing that @@ -3093,7 +3093,7 @@ Additional sub-options for this setting include: Required if `enabled` is set to true. * `subject_claim`: Name of the claim containing a unique identifier for the user. Optional, defaults to `sub`. -* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the +* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the "iss" claim will be required and validated for all JSON web tokens. * `audiences`: A list of audiences to validate the "aud" claim against. Optional. If provided the "aud" claim will be required and validated for all JSON web tokens. @@ -3103,7 +3103,7 @@ Additional sub-options for this setting include: Example configuration: ```yaml jwt_config: - enabled: true + enabled: true secret: "provided-by-your-issuer" algorithm: "provided-by-your-issuer" subject_claim: "name_of_claim" @@ -3114,7 +3114,7 @@ jwt_config: --- ### `password_config` -Use this setting to enable password-based logins. +Use this setting to enable password-based logins. This setting has the following sub-options: * `enabled`: Defaults to true. @@ -3123,10 +3123,10 @@ This setting has the following sub-options: to log in and reauthenticate, whilst preventing new users from setting passwords. * `localdb_enabled`: Set to false to disable authentication against the local password database. This is ignored if `enabled` is false, and is only useful - if you have other `password_providers`. Defaults to true. + if you have other `password_providers`. Defaults to true. * `pepper`: Set the value here to a secret random string for extra security. DO NOT CHANGE THIS AFTER INITIAL SETUP! -* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc. +* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc. Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows: * `enabled`: Defaults to false. Set to true to enable. * `minimum_length`: Minimum accepted length for a password. Defaults to 0. @@ -3138,7 +3138,7 @@ This setting has the following sub-options: Defaults to false. * `require_uppercase`: Whether a password must contain at least one uppercase letter. Defaults to false. - + Example configuration: ```yaml @@ -3160,7 +3160,7 @@ password_config: The amount of time to allow a user-interactive authentication session to be active. -This defaults to 0, meaning the user is queried for their credentials +This defaults to 0, meaning the user is queried for their credentials before every action, but this can be overridden to allow a single validation to be re-used. This weakens the protections afforded by the user-interactive authentication process, by allowing for multiple @@ -3188,7 +3188,7 @@ Server admins can configure custom templates for email content. See This setting has the following sub-options: * `smtp_host`: The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. * `smtp_port`: The port on the mail server for outgoing SMTP. Defaults to 465 if `force_tls` is true, else 25. - + _Changed in Synapse 1.64.0:_ the default port is now aware of `force_tls`. * `smtp_user` and `smtp_pass`: Username/password for authentication to the SMTP server. By default, no authentication is attempted. @@ -3196,7 +3196,7 @@ This setting has the following sub-options: to TLS via STARTTLS. If this option is set to true, TLS is used from the start (Implicit TLS), and the option `require_transport_security` is ignored. It is recommended to enable this if supported by your mail server. - + _New in Synapse 1.64.0._ * `require_transport_security`: Set to true to require TLS transport security for SMTP. By default, Synapse will connect over plain text, and will then switch to @@ -3231,8 +3231,8 @@ This setting has the following sub-options: message(s) have been sent to, e.g. "My super room". In addition, emails related to account administration will can use the '%(server_name)s' placeholder, which will be replaced by the value of the `server_name` setting in your Synapse configuration. - - Here is a list of subjects for notification emails that can be set: + + Here is a list of subjects for notification emails that can be set: * `message_from_person_in_room`: Subject to use to notify about one message from one or more user(s) in a room which has a name. Defaults to "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..." * `message_from_person`: Subject to use to notify about one message from one or more user(s) in a @@ -3241,13 +3241,13 @@ This setting has the following sub-options: a room which doesn't have a name. Defaults to "[%(app)s] You have messages on %(app)s from %(person)s..." * `messages_in_room`: Subject to use to notify about multiple messages in a room which has a name. Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room..." - * `messages_in_room_and_others`: Subject to use to notify about multiple messages in multiple rooms. + * `messages_in_room_and_others`: Subject to use to notify about multiple messages in multiple rooms. Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room and others..." * `messages_from_person_and_others`: Subject to use to notify about multiple messages from multiple persons in multiple rooms. This is similar to the setting above except it's used when - the room in which the notification was triggered has no name. Defaults to + the room in which the notification was triggered has no name. Defaults to "[%(app)s] You have messages on %(app)s from %(person)s and others..." - * `invite_from_person_to_room`: Subject to use to notify about an invite to a room which has a name. + * `invite_from_person_to_room`: Subject to use to notify about an invite to a room which has a name. Defaults to "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..." * `invite_from_person`: Subject to use to notify about an invite to a room which doesn't have a name. Defaults to "[%(app)s] %(person)s has invited you to chat on %(app)s..." @@ -3292,7 +3292,7 @@ Configuration settings related to push notifications --- ### `push` -This setting defines options for push notifications. +This setting defines options for push notifications. This option has a number of sub-options. They are as follows: * `include_content`: Clients requesting push notifications can either have the body of @@ -3307,7 +3307,7 @@ This option has a number of sub-options. They are as follows: notification saying only that a message arrived and who it came from. Defaults to true. Set to false to only include the event ID and room ID in push notification payloads. * `group_unread_count_by_room: false`: When a push notification is received, an unread count is also sent. - This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the + This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the user has unread messages in. Defaults to true, meaning push clients will see the number of rooms with unread messages in them. Set to false to instead send the number of unread messages. @@ -3347,7 +3347,7 @@ encryption_enabled_by_default_for_room_type: invite --- ### `user_directory` -This setting defines options related to the user directory. +This setting defines options related to the user directory. This option has the following sub-options: * `enabled`: Defines whether users can search the user directory. If false then @@ -3365,7 +3365,7 @@ This option has the following sub-options: Set to true to return search results containing all known users, even if that user does not share a room with the requester. * `prefer_local_users`: Defines whether to prefer local users in search query results. - If set to true, local users are more likely to appear above remote users when searching the + If set to true, local users are more likely to appear above remote users when searching the user directory. Defaults to false. Example configuration: @@ -3430,15 +3430,15 @@ user_consent: ### `stats` Settings for local room and user statistics collection. See [here](../../room_and_user_statistics.md) -for more. +for more. * `enabled`: Set to false to disable room and user statistics. Note that doing so may cause certain features (such as the room directory) not to work - correctly. Defaults to true. + correctly. Defaults to true. Example configuration: ```yaml -stats: +stats: enabled: false ``` --- @@ -3470,7 +3470,7 @@ server_notices: Set to false to disable searching the public room list. When disabled blocks searching local and remote room lists for local and remote -users by always returning an empty list for all queries. Defaults to true. +users by always returning an empty list for all queries. Defaults to true. Example configuration: ```yaml @@ -3496,7 +3496,7 @@ Options for the rules include: * `user_id`: Matches against the creator of the alias. Defaults to "*". * `alias`: Matches against the alias being created. Defaults to "*". * `room_id`: Matches against the room ID the alias is being pointed at. Defaults to "*" -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. +* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. Example configuration: ```yaml @@ -3526,7 +3526,7 @@ Options for the rules include: * `user_id`: Matches against the creator of the alias. Defaults to "*". * `alias`: Matches against any current local or canonical aliases associated with the room. Defaults to "*". * `room_id`: Matches against the room ID being published. Defaults to "*". -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. +* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. Example configuration: ```yaml @@ -3578,14 +3578,14 @@ synapse or any other services which support opentracing Sub-options include: * `enabled`: whether tracing is enabled. Set to true to enable. Disabled by default. * `homeserver_whitelist`: The list of homeservers we wish to send and receive span contexts and span baggage. - See [here](../../opentracing.md) for more. + See [here](../../opentracing.md) for more. This is a list of regexes which are matched against the `server_name` of the homeserver. By default, it is empty, so no servers are matched. * `force_tracing_for_users`: # A list of the matrix IDs of users whose requests will always be traced, even if the tracing system would otherwise drop the traces due to probabilistic sampling. By default, the list is empty. * `jaeger_config`: Jaeger can be configured to sample traces at different rates. - All configuration options provided by Jaeger can be set here. Jaeger's configuration is + All configuration options provided by Jaeger can be set here. Jaeger's configuration is mostly related to trace sampling which is documented [here](https://www.jaegertracing.io/docs/latest/sampling/). Example configuration: @@ -3613,7 +3613,7 @@ Configuration options related to workers. ### `send_federation` Controls sending of outbound federation transactions on the main process. -Set to false if using a federation sender worker. Defaults to true. +Set to false if using a federation sender worker. Defaults to true. Example configuration: ```yaml @@ -3623,12 +3623,12 @@ send_federation: false ### `federation_sender_instances` It is possible to run multiple federation sender workers, in which case the -work is balanced across them. Use this setting to list the senders. +work is balanced across them. Use this setting to list the senders. This configuration setting must be shared between all federation sender workers, and if changed all federation sender workers must be stopped at the same time and then started, to ensure that all instances are running with the same config (otherwise -events may be dropped). +events may be dropped). Example configuration: ```yaml @@ -3639,7 +3639,7 @@ federation_sender_instances: ### `instance_map` When using workers this should be a map from worker name to the -HTTP replication listener of the worker, if configured. +HTTP replication listener of the worker, if configured. Example configuration: ```yaml @@ -3688,7 +3688,7 @@ worker_replication_secret: "secret_secret" Configuration for Redis when using workers. This *must* be enabled when using workers (unless using old style direct TCP configuration). This setting has the following sub-options: -* `enabled`: whether to use Redis support. Defaults to false. +* `enabled`: whether to use Redis support. Defaults to false. * `host` and `port`: Optional host and port to use to connect to redis. Defaults to localhost and 6379 * `password`: Optional password if configured on the Redis instance. @@ -3702,7 +3702,7 @@ redis: password: ``` ## Background Updates ## -Configuration settings related to background updates. +Configuration settings related to background updates. --- ### `background_updates` @@ -3711,7 +3711,7 @@ Background updates are database updates that are run in the background in batche The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to sleep can all be configured. This is helpful to speed up or slow down the updates. This setting has the following sub-options: -* `background_update_duration_ms`: How long in milliseconds to run a batch of background updates for. Defaults to 100. +* `background_update_duration_ms`: How long in milliseconds to run a batch of background updates for. Defaults to 100. Set a different time to change the default. * `sleep_enabled`: Whether to sleep between updates. Defaults to true. Set to false to change the default. * `sleep_duration_ms`: If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. @@ -3721,7 +3721,7 @@ This setting has the following sub-options: * `default_batch_size`: The batch size to use for the first iteration of a new background update. The default is 100. Set a size to change the default. -Example configuration: +Example configuration: ```yaml background_updates: background_update_duration_ms: 500 diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6bafa7d3f3..745e704141 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -44,6 +44,7 @@ from synapse.app._base import ( register_start, ) from synapse.config._base import ConfigError, format_config_error +from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.server import ListenerConfig from synapse.federation.transport.server import TransportLayerServer @@ -201,7 +202,7 @@ class SynapseHomeServer(HomeServer): } ) - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: from synapse.rest.synapse.client.password_reset import ( PasswordResetSubmitTokenResource, ) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 73b469f414..7765c5b454 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -18,6 +18,7 @@ import email.utils import logging import os +from enum import Enum from typing import Any import attr @@ -135,22 +136,40 @@ class EmailConfig(Config): self.email_enable_notifs = email_config.get("enable_notifs", False) + self.threepid_behaviour_email = ( + # Have Synapse handle the email sending if account_threepid_delegates.email + # is not defined + # msisdn is currently always remote while Synapse does not support any method of + # sending SMS messages + ThreepidBehaviour.REMOTE + if self.root.registration.account_threepid_delegate_email + else ThreepidBehaviour.LOCAL + ) + if config.get("trust_identity_server_for_password_resets"): raise ConfigError( - 'The config option "trust_identity_server_for_password_resets" ' - "is no longer supported. Please remove it from the config file." + 'The config option "trust_identity_server_for_password_resets" has been removed.' + "Please consult the configuration manual at docs/usage/configuration/config_documentation.md for " + "details and update your config file." ) - # If we have email config settings, assume that we can verify ownership of - # email addresses. - self.can_verify_email = email_config != {} + self.local_threepid_handling_disabled_due_to_email_config = False + if ( + self.threepid_behaviour_email == ThreepidBehaviour.LOCAL + and email_config == {} + ): + # We cannot warn the user this has happened here + # Instead do so when a user attempts to reset their password + self.local_threepid_handling_disabled_due_to_email_config = True + + self.threepid_behaviour_email = ThreepidBehaviour.OFF # Get lifetime of a validation token in milliseconds self.email_validation_token_lifetime = self.parse_duration( email_config.get("validation_token_lifetime", "1h") ) - if self.can_verify_email: + if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL: missing = [] if not self.email_notif_from: missing.append("email.notif_from") @@ -341,3 +360,18 @@ class EmailConfig(Config): "Config option email.invite_client_location must be a http or https URL", path=("email", "invite_client_location"), ) + + +class ThreepidBehaviour(Enum): + """ + Enum to define the behaviour of Synapse with regards to when it contacts an identity + server for 3pid registration and password resets + + REMOTE = use an external server to send tokens + LOCAL = send tokens ourselves + OFF = disable registration via 3pid and password resets + """ + + REMOTE = "remote" + LOCAL = "local" + OFF = "off" diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 685a0423c5..01fb0331bc 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import argparse +import logging from typing import Any, Optional from synapse.api.constants import RoomCreationPreset @@ -20,11 +21,15 @@ from synapse.config._base import Config, ConfigError from synapse.types import JsonDict, RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool -NO_EMAIL_DELEGATE_ERROR = """\ -Delegation of email verification to an identity server is no longer supported. To +logger = logging.getLogger(__name__) + +LEGACY_EMAIL_DELEGATE_WARNING = """\ +Delegation of email verification to an identity server is now deprecated. To continue to allow users to add email addresses to their accounts, and use them for password resets, configure Synapse with an SMTP server via the `email` setting, and remove `account_threepid_delegates.email`. + +This will be an error in a future version. """ @@ -59,8 +64,9 @@ class RegistrationConfig(Config): account_threepid_delegates = config.get("account_threepid_delegates") or {} if "email" in account_threepid_delegates: - raise ConfigError(NO_EMAIL_DELEGATE_ERROR) - # self.account_threepid_delegate_email = account_threepid_delegates.get("email") + logger.warning(LEGACY_EMAIL_DELEGATE_WARNING) + + self.account_threepid_delegate_email = account_threepid_delegates.get("email") self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 9571d461c8..e5afe84df9 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.config.emailconfig import ThreepidBehaviour from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest @@ -415,6 +416,48 @@ class IdentityHandler: return session_id + async def request_email_token( + self, + id_server: str, + email: str, + client_secret: str, + send_attempt: int, + next_link: Optional[str] = None, + ) -> JsonDict: + """ + Request an external server send an email on our behalf for the purposes of threepid + validation. + + Args: + id_server: The identity server to proxy to + email: The email to send the message to + client_secret: The unique client_secret sends by the user + send_attempt: Which attempt this is + next_link: A link to redirect the user to once they submit the token + + Returns: + The json response body from the server + """ + params = { + "email": email, + "client_secret": client_secret, + "send_attempt": send_attempt, + } + if next_link: + params["next_link"] = next_link + + try: + data = await self.http_client.post_json_get_json( + id_server + "/_matrix/identity/api/v1/validate/email/requestToken", + params, + ) + return data + except HttpResponseException as e: + logger.info("Proxied requestToken failed: %r", e) + raise e.to_synapse_error() + except RequestTimedOutError: + raise SynapseError(500, "Timed out contacting identity server") + async def requestMsisdnToken( self, id_server: str, @@ -488,7 +531,18 @@ class IdentityHandler: validation_session = None # Try to validate as email - if self.hs.config.email.can_verify_email: + if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + # Remote emails will only be used if a valid identity server is provided. + assert ( + self.hs.config.registration.account_threepid_delegate_email is not None + ) + + # Ask our delegated email identity server + validation_session = await self.threepid_from_creds( + self.hs.config.registration.account_threepid_delegate_email, + threepid_creds, + ) + elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: # Get a validated session matching these details validation_session = await self.store.get_threepid_validation_session( "email", client_secret, sid=sid, validated=True diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index a744d68c64..05cebb5d4d 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -19,6 +19,7 @@ from twisted.web.client import PartialDownloadError from synapse.api.constants import LoginType from synapse.api.errors import Codes, LoginError, SynapseError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.util import json_decoder if TYPE_CHECKING: @@ -152,7 +153,7 @@ class _BaseThreepidAuthChecker: logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - # msisdns are currently always verified via the IS + # msisdns are currently always ThreepidBehaviour.REMOTE if medium == "msisdn": if not self.hs.config.registration.account_threepid_delegate_msisdn: raise SynapseError( @@ -163,7 +164,18 @@ class _BaseThreepidAuthChecker: threepid_creds, ) elif medium == "email": - if self.hs.config.email.can_verify_email: + if ( + self.hs.config.email.threepid_behaviour_email + == ThreepidBehaviour.REMOTE + ): + assert self.hs.config.registration.account_threepid_delegate_email + threepid = await identity_handler.threepid_from_creds( + self.hs.config.registration.account_threepid_delegate_email, + threepid_creds, + ) + elif ( + self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL + ): threepid = None row = await self.store.get_threepid_validation_session( medium, @@ -215,7 +227,10 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec _BaseThreepidAuthChecker.__init__(self, hs) def is_enabled(self) -> bool: - return self.hs.config.email.can_verify_email + return self.hs.config.email.threepid_behaviour_email in ( + ThreepidBehaviour.REMOTE, + ThreepidBehaviour.LOCAL, + ) async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("email", authdict) diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 0cc87a4001..50edc6b7d3 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -28,6 +28,7 @@ from synapse.api.errors import ( SynapseError, ThreepidValidationError, ) +from synapse.config.emailconfig import ThreepidBehaviour from synapse.handlers.ui_auth import UIAuthSessionDataConstants from synapse.http.server import HttpServer, finish_request, respond_with_html from synapse.http.servlet import ( @@ -63,7 +64,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): self.config = hs.config self.identity_handler = hs.get_identity_handler() - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -72,10 +73,11 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.config.email.can_verify_email: - logger.warning( - "User password resets have been disabled due to lack of email config" - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.email.local_threepid_handling_disabled_due_to_email_config: + logger.warning( + "User password resets have been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based password resets have been disabled on this server" ) @@ -127,21 +129,35 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) - # Send password reset emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_password_reset_mail, - next_link, - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + assert self.hs.config.registration.account_threepid_delegate_email + + # Have the configured identity server handle the request + ret = await self.identity_handler.request_email_token( + self.hs.config.registration.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, + ) + else: + # Send password reset emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_password_reset_mail, + next_link, + ) + + # Wrap the session id in a JSON object + ret = {"sid": sid} threepid_send_requests.labels(type="email", reason="password_reset").observe( send_attempt ) - # Wrap the session id in a JSON object - return 200, {"sid": sid} + return 200, ret class PasswordRestServlet(RestServlet): @@ -333,7 +349,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() self.store = self.hs.get_datastores().main - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -342,10 +358,11 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.config.email.can_verify_email: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.email.local_threepid_handling_disabled_due_to_email_config: + logger.warning( + "Adding emails have been disabled due to lack of an email config" + ) raise SynapseError( 400, "Adding an email to your account is disabled on this server" ) @@ -396,20 +413,35 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_add_threepid_mail, - next_link, - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + assert self.hs.config.registration.account_threepid_delegate_email + + # Have the configured identity server handle the request + ret = await self.identity_handler.request_email_token( + self.hs.config.registration.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, + ) + else: + # Send threepid validation emails from Synapse + sid = await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_add_threepid_mail, + next_link, + ) + + # Wrap the session id in a JSON object + ret = {"sid": sid} threepid_send_requests.labels(type="email", reason="add_threepid").observe( send_attempt ) - # Wrap the session id in a JSON object - return 200, {"sid": sid} + return 200, ret class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -502,19 +534,25 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): self.config = hs.config self.clock = hs.get_clock() self.store = hs.get_datastores().main - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self._failure_email_template = ( self.config.email.email_add_threepid_template_failure_html ) async def on_GET(self, request: Request) -> None: - if not self.config.email.can_verify_email: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.email.local_threepid_handling_disabled_due_to_email_config: + logger.warning( + "Adding emails have been disabled due to lack of an email config" + ) raise SynapseError( 400, "Adding an email to your account is disabled on this server" ) + elif self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + raise SynapseError( + 400, + "This homeserver is not validating threepids.", + ) sid = parse_string(request, "sid", required=True) token = parse_string(request, "token", required=True) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index a8402cdb3a..b7ab090bbd 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -31,6 +31,7 @@ from synapse.api.errors import ( ) from synapse.api.ratelimiting import Ratelimiter from synapse.config import ConfigError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.config.server import is_threepid_reserved @@ -73,7 +74,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): self.identity_handler = hs.get_identity_handler() self.config = hs.config - if self.hs.config.email.can_verify_email: + if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( hs=self.hs, app_name=self.config.email.email_app_name, @@ -82,10 +83,13 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.email.can_verify_email: - logger.warning( - "Email registration has been disabled due to lack of email config" - ) + if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if ( + self.hs.config.email.local_threepid_handling_disabled_due_to_email_config + ): + logger.warning( + "Email registration has been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based registration has been disabled on this server" ) @@ -134,21 +138,35 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - # Send registration emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_registration_mail, - next_link, - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + assert self.hs.config.registration.account_threepid_delegate_email + + # Have the configured identity server handle the request + ret = await self.identity_handler.request_email_token( + self.hs.config.registration.account_threepid_delegate_email, + email, + client_secret, + send_attempt, + next_link, + ) + else: + # Send registration emails from Synapse, + # wrapping the session id in a JSON object. + ret = { + "sid": await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_registration_mail, + next_link, + ) + } threepid_send_requests.labels(type="email", reason="register").observe( send_attempt ) - # Wrap the session id in a JSON object - return 200, {"sid": sid} + return 200, ret class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -242,7 +260,7 @@ class RegistrationSubmitTokenServlet(RestServlet): self.clock = hs.get_clock() self.store = hs.get_datastores().main - if self.config.email.can_verify_email: + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self._failure_email_template = ( self.config.email.email_registration_template_failure_html ) @@ -252,10 +270,11 @@ class RegistrationSubmitTokenServlet(RestServlet): raise SynapseError( 400, "This medium is currently not supported for registration" ) - if not self.config.email.can_verify_email: - logger.warning( - "User registration via email has been disabled due to lack of email config" - ) + if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF: + if self.config.email.local_threepid_handling_disabled_due_to_email_config: + logger.warning( + "User registration via email has been disabled due to lack of email config" + ) raise SynapseError( 400, "Email-based registration is disabled on this server" ) diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py index b9402cfb75..6ac9dbc7c9 100644 --- a/synapse/rest/synapse/client/password_reset.py +++ b/synapse/rest/synapse/client/password_reset.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Tuple from twisted.web.server import Request from synapse.api.errors import ThreepidValidationError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.server import DirectServeHtmlResource from synapse.http.servlet import parse_string from synapse.util.stringutils import assert_valid_client_secret @@ -45,6 +46,9 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): self.clock = hs.get_clock() self.store = hs.get_datastores().main + self._local_threepid_handling_disabled_due_to_email_config = ( + hs.config.email.local_threepid_handling_disabled_due_to_email_config + ) self._confirmation_email_template = ( hs.config.email.email_password_reset_template_confirmation_html ) @@ -55,8 +59,8 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource): hs.config.email.email_password_reset_template_failure_html ) - # This resource should only be mounted if email validation is enabled - assert hs.config.email.can_verify_email + # This resource should not be mounted if threepid behaviour is not LOCAL + assert hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]: sid = parse_string(request, "sid", required=True) diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 071b488cc0..f8e64ce6ac 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -586,9 +586,9 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "require_at_registration": True, }, "account_threepid_delegates": { + "email": "https://id_server", "msisdn": "https://id_server", }, - "email": {"notif_from": "Synapse "}, } ) def test_advertised_flows_captcha_and_terms_and_3pids(self) -> None: -- cgit 1.5.1 From 224d792dd7827fb53b9ed3b73a99f4acefb456ba Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 1 Aug 2022 13:53:56 +0100 Subject: Refactor `_resolve_state_at_missing_prevs` to return an `EventContext` (#13404) Previously, `_resolve_state_at_missing_prevs` returned the resolved state before an event and a partial state flag. These were unwieldy to carry around would only ever be used to build an event context. Build the event context directly instead. Signed-off-by: Sean Quah --- changelog.d/13404.misc | 1 + synapse/handlers/federation_event.py | 126 ++++++++++++----------------------- synapse/state/__init__.py | 8 +++ synapse/storage/controllers/state.py | 4 ++ tests/handlers/test_federation.py | 15 +++-- 5 files changed, 68 insertions(+), 86 deletions(-) create mode 100644 changelog.d/13404.misc (limited to 'tests') diff --git a/changelog.d/13404.misc b/changelog.d/13404.misc new file mode 100644 index 0000000000..655be4061b --- /dev/null +++ b/changelog.d/13404.misc @@ -0,0 +1 @@ +Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index bcc755a376..612e5aaa5b 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -23,7 +23,6 @@ from typing import ( Dict, Iterable, List, - Optional, Sequence, Set, Tuple, @@ -278,9 +277,8 @@ class FederationEventHandler: ) try: - await self._process_received_pdu( - origin, pdu, state_ids=None, partial_state=None - ) + context = await self._state_handler.compute_event_context(pdu) + await self._process_received_pdu(origin, pdu, context) except PartialStateConflictError: # The room was un-partial stated while we were processing the PDU. # Try once more, with full state this time. @@ -288,9 +286,8 @@ class FederationEventHandler: "Room %s was un-partial stated while processing the PDU, trying again.", room_id, ) - await self._process_received_pdu( - origin, pdu, state_ids=None, partial_state=None - ) + context = await self._state_handler.compute_event_context(pdu) + await self._process_received_pdu(origin, pdu, context) async def on_send_membership_event( self, origin: str, event: EventBase @@ -320,6 +317,7 @@ class FederationEventHandler: The event and context of the event after inserting it into the room graph. Raises: + RuntimeError if any prev_events are missing SynapseError if the event is not accepted into the room PartialStateConflictError if the room was un-partial stated in between computing the state at the event and persisting it. The caller should @@ -380,7 +378,7 @@ class FederationEventHandler: # need to. await self._event_creation_handler.cache_joined_hosts_for_event(event, context) - await self._check_for_soft_fail(event, None, origin=origin) + await self._check_for_soft_fail(event, context=context, origin=origin) await self._run_push_actions_and_persist_event(event, context) return event, context @@ -538,36 +536,10 @@ class FederationEventHandler: # # This is the same operation as we do when we receive a regular event # over federation. - state_ids, partial_state = await self._resolve_state_at_missing_prevs( + context = await self._compute_event_context_with_maybe_missing_prevs( destination, event ) - - # There are three possible cases for (state_ids, partial_state): - # * `state_ids` and `partial_state` are both `None` if we had all the - # prev_events. The prev_events may or may not have partial state and - # we won't know until we compute the event context. - # * `state_ids` is not `None` and `partial_state` is `False` if we were - # missing some prev_events (but we have full state for any we did - # have). We calculated the full state after the prev_events. - # * `state_ids` is not `None` and `partial_state` is `True` if we were - # missing some, but not all, prev_events. At least one of the - # prev_events we did have had partial state, so we calculated a partial - # state after the prev_events. - - context = None - if state_ids is not None and partial_state: - # the state after the prev events is still partial. We can't de-partial - # state the event, so don't bother building the event context. - pass - else: - # build a new state group for it if need be - context = await self._state_handler.compute_event_context( - event, - state_ids_before_event=state_ids, - partial_state=partial_state, - ) - - if context is None or context.partial_state: + if context.partial_state: # this can happen if some or all of the event's prev_events still have # partial state. We were careful to only pick events from the db without # partial-state prev events, so that implies that a prev event has @@ -840,26 +812,25 @@ class FederationEventHandler: try: try: - state_ids, partial_state = await self._resolve_state_at_missing_prevs( + context = await self._compute_event_context_with_maybe_missing_prevs( origin, event ) await self._process_received_pdu( origin, event, - state_ids=state_ids, - partial_state=partial_state, + context, backfilled=backfilled, ) except PartialStateConflictError: # The room was un-partial stated while we were processing the event. # Try once more, with full state this time. - state_ids, partial_state = await self._resolve_state_at_missing_prevs( + context = await self._compute_event_context_with_maybe_missing_prevs( origin, event ) # We ought to have full state now, barring some unlikely race where we left and # rejoned the room in the background. - if state_ids is not None and partial_state: + if context.partial_state: raise AssertionError( f"Event {event.event_id} still has a partial resolved state " f"after room {event.room_id} was un-partial stated" @@ -868,8 +839,7 @@ class FederationEventHandler: await self._process_received_pdu( origin, event, - state_ids=state_ids, - partial_state=partial_state, + context, backfilled=backfilled, ) except FederationError as e: @@ -878,15 +848,18 @@ class FederationEventHandler: else: raise - async def _resolve_state_at_missing_prevs( + async def _compute_event_context_with_maybe_missing_prevs( self, dest: str, event: EventBase - ) -> Tuple[Optional[StateMap[str]], Optional[bool]]: - """Calculate the state at an event with missing prev_events. + ) -> EventContext: + """Build an EventContext structure for a non-outlier event whose prev_events may + be missing. - This is used when we have pulled a batch of events from a remote server, and - still don't have all the prev_events. + This is used when we have pulled a batch of events from a remote server, and may + not have all the prev_events. - If we already have all the prev_events for `event`, this method does nothing. + To build an EventContext, we need to calculate the state before the event. If we + already have all the prev_events for `event`, we can simply use the state after + the prev_events to calculate the state before `event`. Otherwise, the missing prevs become new backwards extremities, and we fall back to asking the remote server for the state after each missing `prev_event`, @@ -907,10 +880,7 @@ class FederationEventHandler: event: an event to check for missing prevs. Returns: - if we already had all the prev events, `None, None`. Otherwise, returns a - tuple containing: - * the event ids of the state at `event`. - * a boolean indicating whether the state may be partial. + The event context. Raises: FederationError if we fail to get the state from the remote server after any @@ -924,7 +894,7 @@ class FederationEventHandler: missing_prevs = prevs - seen if not missing_prevs: - return None, None + return await self._state_handler.compute_event_context(event) logger.info( "Event %s is missing prev_events %s: calculating state for a " @@ -990,7 +960,9 @@ class FederationEventHandler: "We can't get valid state history.", affected=event_id, ) - return state_map, partial_state + return await self._state_handler.compute_event_context( + event, state_ids_before_event=state_map, partial_state=partial_state + ) async def _get_state_ids_after_missing_prev_event( self, @@ -1159,8 +1131,7 @@ class FederationEventHandler: self, origin: str, event: EventBase, - state_ids: Optional[StateMap[str]], - partial_state: Optional[bool], + context: EventContext, backfilled: bool = False, ) -> None: """Called when we have a new non-outlier event. @@ -1182,32 +1153,18 @@ class FederationEventHandler: event: event to be persisted - state_ids: Normally None, but if we are handling a gap in the graph - (ie, we are missing one or more prev_events), the resolved state at the - event - - partial_state: - `True` if `state_ids` is partial and omits non-critical membership - events. - `False` if `state_ids` is the full state. - `None` if `state_ids` is not provided. In this case, the flag will be - calculated based on `event`'s prev events. + context: The `EventContext` to persist the event with. backfilled: True if this is part of a historical batch of events (inhibits notification to clients, and validation of device keys.) PartialStateConflictError: if the room was un-partial stated in between - computing the state at the event and persisting it. The caller should retry - exactly once in this case. + computing the state at the event and persisting it. The caller should + recompute `context` and retry exactly once when this happens. """ logger.debug("Processing event: %s", event) assert not event.internal_metadata.outlier - context = await self._state_handler.compute_event_context( - event, - state_ids_before_event=state_ids, - partial_state=partial_state, - ) try: await self._check_event_auth(origin, event, context) except AuthError as e: @@ -1219,7 +1176,7 @@ class FederationEventHandler: # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. - await self._check_for_soft_fail(event, state_ids, origin=origin) + await self._check_for_soft_fail(event, context=context, origin=origin) await self._run_push_actions_and_persist_event(event, context, backfilled) @@ -1782,7 +1739,7 @@ class FederationEventHandler: async def _check_for_soft_fail( self, event: EventBase, - state_ids: Optional[StateMap[str]], + context: EventContext, origin: str, ) -> None: """Checks if we should soft fail the event; if so, marks the event as @@ -1793,7 +1750,7 @@ class FederationEventHandler: Args: event - state_ids: The state at the event if we don't have all the event's prev events + context: The `EventContext` which we are about to persist the event with. origin: The host the event originates from. """ if await self._store.is_partial_state_room(event.room_id): @@ -1819,11 +1776,15 @@ class FederationEventHandler: auth_types = auth_types_for_event(room_version_obj, event) # Calculate the "current state". - if state_ids is not None: - # If we're explicitly given the state then we won't have all the - # prev events, and so we have a gap in the graph. In this case - # we want to be a little careful as we might have been down for - # a while and have an incorrect view of the current state, + seen_event_ids = await self._store.have_events_in_timeline(prev_event_ids) + has_missing_prevs = bool(prev_event_ids - seen_event_ids) + if has_missing_prevs: + # We don't have all the prev_events of this event, which means we have a + # gap in the graph, and the new event is going to become a new backwards + # extremity. + # + # In this case we want to be a little careful as we might have been + # down for a while and have an incorrect view of the current state, # however we still want to do checks as gaps are easy to # maliciously manufacture. # @@ -1836,6 +1797,7 @@ class FederationEventHandler: event.room_id, extrem_ids ) state_sets: List[StateMap[str]] = list(state_sets_d.values()) + state_ids = await context.get_prev_state_ids() state_sets.append(state_ids) current_state_ids = ( await self._state_resolution_handler.resolve_events_with_store( diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 69834de0de..c355e4f98a 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -278,6 +278,10 @@ class StateHandler: flag will be calculated based on `event`'s prev events. Returns: The event context. + + Raises: + RuntimeError if `state_ids_before_event` is not provided and one or more + prev events are missing or outliers. """ assert not event.internal_metadata.is_outlier() @@ -432,6 +436,10 @@ class StateHandler: Returns: The resolved state + + Raises: + RuntimeError if we don't have a state group for one or more of the events + (ie. they are outliers or unknown) """ logger.debug("resolve_state_groups event_ids %s", event_ids) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 20805c94fa..1e35046e07 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -338,6 +338,10 @@ class StateStorageController: event_ids: events to get state groups for await_full_state: if true, will block if we do not yet have complete state at these events. + + Raises: + RuntimeError if we don't have a state group for one or more of the events + (ie. they are outliers or unknown) """ if await_full_state: await self._partial_state_events_tracker.await_full_state(event_ids) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index fb06e5e812..aea96a0986 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -280,16 +280,23 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): # we poke this directly into _process_received_pdu, to avoid the # federation handler wanting to backfill the fake event. - self.get_success( - federation_event_handler._process_received_pdu( - self.OTHER_SERVER_NAME, + state_handler = self.hs.get_state_handler() + context = self.get_success( + state_handler.compute_event_context( event, - state_ids={ + state_ids_before_event={ (e.type, e.state_key): e.event_id for e in current_state }, partial_state=False, ) ) + self.get_success( + federation_event_handler._process_received_pdu( + self.OTHER_SERVER_NAME, + event, + context, + ) + ) # we should now have 8 backwards extremities. backwards_extremities = self.get_success( -- cgit 1.5.1 From e17e5c97e0d2be7a82c782e8ef9774e682968c7b Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 1 Aug 2022 16:45:39 +0000 Subject: Faster Room Joins: don't leave a stuck room partial state flag if the join fails. (#13403) --- changelog.d/13403.misc | 1 + synapse/handlers/federation.py | 32 +++++----- tests/handlers/test_federation.py | 122 +++++++++++++++++++++++++++++++++++++- 3 files changed, 140 insertions(+), 15 deletions(-) create mode 100644 changelog.d/13403.misc (limited to 'tests') diff --git a/changelog.d/13403.misc b/changelog.d/13403.misc new file mode 100644 index 0000000000..cb7b38153c --- /dev/null +++ b/changelog.d/13403.misc @@ -0,0 +1 @@ +Faster Room Joins: don't leave a stuck room partial state flag if the join fails. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3b5eaf5156..1cf6cb32e3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -546,9 +546,9 @@ class FederationHandler: ) if ret.partial_state: - # TODO(faster_joins): roll this back if we don't manage to start the - # background resync (eg process_remote_join fails) - # https://github.com/matrix-org/synapse/issues/12998 + # Mark the room as having partial state. + # The background process is responsible for unmarking this flag, + # even if the join fails. await self.store.store_partial_state_room(room_id, ret.servers_in_room) try: @@ -574,17 +574,21 @@ class FederationHandler: room_id, ) raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0) - - if ret.partial_state: - # Kick off the process of asynchronously fetching the state for this - # room. - run_as_background_process( - desc="sync_partial_state_room", - func=self._sync_partial_state_room, - initial_destination=origin, - other_destinations=ret.servers_in_room, - room_id=room_id, - ) + finally: + # Always kick off the background process that asynchronously fetches + # state for the room. + # If the join failed, the background process is responsible for + # cleaning up — including unmarking the room as a partial state room. + if ret.partial_state: + # Kick off the process of asynchronously fetching the state for this + # room. + run_as_background_process( + desc="sync_partial_state_room", + func=self._sync_partial_state_room, + initial_destination=origin, + other_destinations=ret.servers_in_room, + room_id=room_id, + ) # We wait here until this instance has seen the events come down # replication (if we're using replication) as the below uses caches. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index aea96a0986..745750b1d7 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -14,6 +14,7 @@ import logging from typing import cast from unittest import TestCase +from unittest.mock import Mock, patch from twisted.test.proto_helpers import MemoryReactor @@ -22,6 +23,7 @@ from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseErro from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.federation.federation_base import event_from_pdu_json +from synapse.federation.federation_client import SendJoinResult from synapse.logging.context import LoggingContext, run_in_background from synapse.rest import admin from synapse.rest.client import login, room @@ -30,7 +32,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest -from tests.test_utils import event_injection +from tests.test_utils import event_injection, make_awaitable logger = logging.getLogger(__name__) @@ -456,3 +458,121 @@ class EventFromPduTestCase(TestCase): }, RoomVersions.V6, ) + + +class PartialJoinTestCase(unittest.FederatingHomeserverTestCase): + def test_failed_partial_join_is_clean(self) -> None: + """ + Tests that, when failing to partial-join a room, we don't get stuck with + a partial-state flag on a room. + """ + + fed_handler = self.hs.get_federation_handler() + fed_client = fed_handler.federation_client + + room_id = "!room:example.com" + membership_event = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.member", + "sender": "@alice:test", + "state_key": "@alice:test", + "content": {"membership": "join"}, + }, + RoomVersions.V10, + ) + + mock_make_membership_event = Mock( + return_value=make_awaitable( + ( + "example.com", + membership_event, + RoomVersions.V10, + ) + ) + ) + + EVENT_CREATE = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.create", + "sender": "@kristina:example.com", + "state_key": "", + "depth": 0, + "content": {"creator": "@kristina:example.com", "room_version": "10"}, + "auth_events": [], + "origin_server_ts": 1, + }, + room_version=RoomVersions.V10, + ) + EVENT_CREATOR_MEMBERSHIP = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.member", + "sender": "@kristina:example.com", + "state_key": "@kristina:example.com", + "content": {"membership": "join"}, + "depth": 1, + "prev_events": [EVENT_CREATE.event_id], + "auth_events": [EVENT_CREATE.event_id], + "origin_server_ts": 1, + }, + room_version=RoomVersions.V10, + ) + EVENT_INVITATION_MEMBERSHIP = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.member", + "sender": "@kristina:example.com", + "state_key": "@alice:test", + "content": {"membership": "invite"}, + "depth": 2, + "prev_events": [EVENT_CREATOR_MEMBERSHIP.event_id], + "auth_events": [ + EVENT_CREATE.event_id, + EVENT_CREATOR_MEMBERSHIP.event_id, + ], + "origin_server_ts": 1, + }, + room_version=RoomVersions.V10, + ) + mock_send_join = Mock( + return_value=make_awaitable( + SendJoinResult( + membership_event, + "example.com", + state=[ + EVENT_CREATE, + EVENT_CREATOR_MEMBERSHIP, + EVENT_INVITATION_MEMBERSHIP, + ], + auth_chain=[ + EVENT_CREATE, + EVENT_CREATOR_MEMBERSHIP, + EVENT_INVITATION_MEMBERSHIP, + ], + partial_state=True, + servers_in_room=["example.com"], + ) + ) + ) + + with patch.object( + fed_client, "make_membership_event", mock_make_membership_event + ), patch.object(fed_client, "send_join", mock_send_join): + # Join and check that our join event is rejected + # (The join event is rejected because it doesn't have any signatures) + join_exc = self.get_failure( + fed_handler.do_invite_join(["example.com"], room_id, "@alice:test", {}), + SynapseError, + ) + self.assertIn("Join event was rejected", str(join_exc)) + + store = self.hs.get_datastores().main + + # Check that we don't have a left-over partial_state entry. + self.assertFalse( + self.get_success(store.is_partial_state_room(room_id)), + f"Stale partial-stated room flag left over for {room_id} after a" + f" failed do_invite_join!", + ) -- cgit 1.5.1