From e6e136decca12648933f974e4151fb936ad9e1fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2019 18:04:46 +0100 Subject: Retry well known on fail. If we have recently seen a valid well-known for a domain we want to retry on (non-final) errors a few times, to handle temporary blips in networking/etc. --- synapse/http/federation/matrix_federation_agent.py | 26 +++-- synapse/http/federation/well_known_resolver.py | 122 +++++++++++++++++---- .../federation/test_matrix_federation_agent.py | 79 +++++++------ 3 files changed, 160 insertions(+), 67 deletions(-) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 71a15f434d..64f62aaeec 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -51,9 +51,9 @@ class MatrixFederationAgent(object): SRVResolver impl to use for looking up SRV records. None to use a default implementation. - _well_known_cache (TTLCache|None): - TTLCache impl for storing cached well-known lookups. None to use a default - implementation. + _well_known_resolver (WellKnownResolver|None): + WellKnownResolver to use to perform well-known lookups. None to use a + default implementation. """ def __init__( @@ -61,7 +61,7 @@ class MatrixFederationAgent(object): reactor, tls_client_options_factory, _srv_resolver=None, - _well_known_cache=None, + _well_known_resolver=None, ): self._reactor = reactor self._clock = Clock(reactor) @@ -76,15 +76,17 @@ class MatrixFederationAgent(object): self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 - self._well_known_resolver = WellKnownResolver( - self._reactor, - agent=Agent( + if _well_known_resolver is None: + _well_known_resolver = WellKnownResolver( self._reactor, - pool=self._pool, - contextFactory=tls_client_options_factory, - ), - well_known_cache=_well_known_cache, - ) + agent=Agent( + self._reactor, + pool=self._pool, + contextFactory=tls_client_options_factory, + ), + ) + + self._well_known_resolver = _well_known_resolver @defer.inlineCallbacks def request(self, method, uri, headers=None, bodyProducer=None): diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index bb250c6922..d59864e298 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -38,6 +38,13 @@ WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60 # period to cache failure to fetch .well-known for WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600 +# period to cache failure to fetch .well-known if there has recently been a +# valid well-known for that domain. +WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60 + +# period to remember there was a valid well-known after valid record expires +WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600 + # cap for .well-known cache period WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600 @@ -49,11 +56,16 @@ WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60 # we'll start trying to refetch 1 minute before it expires. WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2 +# Number of times we retry fetching a well-known for a domain we know recently +# had a valid entry. +WELL_KNOWN_RETRY_ATTEMPTS = 3 + logger = logging.getLogger(__name__) _well_known_cache = TTLCache("well-known") +_had_valid_well_known_cache = TTLCache("had-valid-well-known") @attr.s(slots=True, frozen=True) @@ -65,14 +77,20 @@ class WellKnownResolver(object): """Handles well-known lookups for matrix servers. """ - def __init__(self, reactor, agent, well_known_cache=None): + def __init__( + self, reactor, agent, well_known_cache=None, had_well_known_cache=None + ): self._reactor = reactor self._clock = Clock(reactor) if well_known_cache is None: well_known_cache = _well_known_cache + if had_well_known_cache is None: + had_well_known_cache = _had_valid_well_known_cache + self._well_known_cache = well_known_cache + self._had_valid_well_known_cache = had_well_known_cache self._well_known_agent = RedirectAgent(agent) @defer.inlineCallbacks @@ -100,7 +118,7 @@ class WellKnownResolver(object): # requests for the same server in parallel? try: with Measure(self._clock, "get_well_known"): - result, cache_period = yield self._do_get_well_known(server_name) + result, cache_period = yield self._fetch_well_known(server_name) except _FetchWellKnownFailure as e: if prev_result and e.temporary: @@ -111,10 +129,20 @@ class WellKnownResolver(object): result = None - # add some randomness to the TTL to avoid a stampeding herd every hour - # after startup - cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD - cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) + if self._had_valid_well_known_cache.get(server_name, False): + # We have recently seen a valid well-known record for this + # server, so we cache the lack of well-known for a shorter time. + cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD + cache_period += random.uniform( + 0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER + ) + else: + # add some randomness to the TTL to avoid a stampeding herd every hour + # after startup + cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD + cache_period += random.uniform( + 0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER + ) if cache_period > 0: self._well_known_cache.set(server_name, result, cache_period) @@ -122,7 +150,7 @@ class WellKnownResolver(object): return WellKnownLookupResult(delegated_server=result) @defer.inlineCallbacks - def _do_get_well_known(self, server_name): + def _fetch_well_known(self, server_name): """Actually fetch and parse a .well-known, without checking the cache Args: @@ -134,24 +162,17 @@ class WellKnownResolver(object): Returns: Deferred[Tuple[bytes,int]]: The lookup result and cache period. """ - uri = b"https://%s/.well-known/matrix/server" % (server_name,) - uri_str = uri.decode("ascii") - logger.info("Fetching %s", uri_str) + + had_valid_well_known = bool( + self._had_valid_well_known_cache.get(server_name, False) + ) # We do this in two steps to differentiate between possibly transient # errors (e.g. can't connect to host, 503 response) and more permenant # errors (such as getting a 404 response). - try: - response = yield make_deferred_yieldable( - self._well_known_agent.request(b"GET", uri) - ) - body = yield make_deferred_yieldable(readBody(response)) - - if 500 <= response.code < 600: - raise Exception("Non-200 response %s" % (response.code,)) - except Exception as e: - logger.info("Error fetching %s: %s", uri_str, e) - raise _FetchWellKnownFailure(temporary=True) + response, body = yield self._make_well_known_request( + server_name, retry=had_valid_well_known + ) try: if response.code != 200: @@ -161,8 +182,11 @@ class WellKnownResolver(object): logger.info("Response from .well-known: %s", parsed_body) result = parsed_body["m.server"].encode("ascii") + except defer.CancelledError: + # Bail if we've been cancelled + raise except Exception as e: - logger.info("Error fetching %s: %s", uri_str, e) + logger.info("Error parsing well-known for %s: %s", server_name, e) raise _FetchWellKnownFailure(temporary=False) cache_period = _cache_period_from_headers( @@ -177,8 +201,62 @@ class WellKnownResolver(object): cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) + # We got a success, mark as such in the cache + self._had_valid_well_known_cache.set( + server_name, + bool(result), + cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID, + ) + return (result, cache_period) + @defer.inlineCallbacks + def _make_well_known_request(self, server_name, retry): + """Make the well known request. + + This will retry the request if requested and it fails (with unable + to connect or receives a 5xx error). + + Args: + server_name (bytes) + retry (bool): Whether to retry the request if it fails. + + Returns: + Deferred[tuple[IResponse, bytes]] Returns the response object and + body. Response may be a non-200 response. + """ + uri = b"https://%s/.well-known/matrix/server" % (server_name,) + uri_str = uri.decode("ascii") + + i = 0 + while True: + i += 1 + + logger.info("Fetching %s", uri_str) + try: + response = yield make_deferred_yieldable( + self._well_known_agent.request(b"GET", uri) + ) + body = yield make_deferred_yieldable(readBody(response)) + + if 500 <= response.code < 600: + raise Exception("Non-200 response %s" % (response.code,)) + + return response, body + except defer.CancelledError: + # Bail if we've been cancelled + raise + except Exception as e: + logger.info("Retry: %s", retry) + if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS: + logger.info("Error fetching %s: %s", uri_str, e) + raise _FetchWellKnownFailure(temporary=True) + + logger.info("Error fetching %s: %s. Retrying", uri_str, e) + + # Sleep briefly in the hopes that they come back up + yield self._clock.sleep(0.5) + def _cache_period_from_headers(headers, time_now=time.time): cache_controls = _parse_cache_control(headers) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 2c568788b3..4d3f31d18c 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -73,8 +73,6 @@ class MatrixFederationAgentTests(TestCase): self.mock_resolver = Mock() - self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) - config_dict = default_config("test", parse=False) config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()] @@ -82,11 +80,21 @@ class MatrixFederationAgentTests(TestCase): config.parse_config_dict(config_dict, "", "") self.tls_factory = ClientTLSOptionsFactory(config) + + self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) + self.had_well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) + self.well_known_resolver = WellKnownResolver( + self.reactor, + Agent(self.reactor, contextFactory=self.tls_factory), + well_known_cache=self.well_known_cache, + had_well_known_cache=self.had_well_known_cache, + ) + self.agent = MatrixFederationAgent( reactor=self.reactor, tls_client_options_factory=self.tls_factory, _srv_resolver=self.mock_resolver, - _well_known_cache=self.well_known_cache, + _well_known_resolver=self.well_known_resolver, ) def _make_connection(self, client_factory, expected_sni): @@ -701,11 +709,18 @@ class MatrixFederationAgentTests(TestCase): config = default_config("test", parse=True) + # Build a new agent and WellKnownResolver with a different tls factory + tls_factory = ClientTLSOptionsFactory(config) agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory(config), + tls_client_options_factory=tls_factory, _srv_resolver=self.mock_resolver, - _well_known_cache=self.well_known_cache, + _well_known_resolver=WellKnownResolver( + self.reactor, + Agent(self.reactor, contextFactory=tls_factory), + well_known_cache=self.well_known_cache, + had_well_known_cache=self.had_well_known_cache, + ), ) test_d = agent.request(b"GET", b"matrix://testserv/foo/bar") @@ -932,15 +947,9 @@ class MatrixFederationAgentTests(TestCase): self.successResultOf(test_d) def test_well_known_cache(self): - well_known_resolver = WellKnownResolver( - self.reactor, - Agent(self.reactor, contextFactory=self.tls_factory), - well_known_cache=self.well_known_cache, - ) - self.reactor.lookups["testserv"] = "1.2.3.4" - fetch_d = well_known_resolver.get_well_known(b"testserv") + fetch_d = self.well_known_resolver.get_well_known(b"testserv") # there should be an attempt to connect on port 443 for the .well-known clients = self.reactor.tcpClients @@ -963,7 +972,7 @@ class MatrixFederationAgentTests(TestCase): well_known_server.loseConnection() # repeat the request: it should hit the cache - fetch_d = well_known_resolver.get_well_known(b"testserv") + fetch_d = self.well_known_resolver.get_well_known(b"testserv") r = self.successResultOf(fetch_d) self.assertEqual(r.delegated_server, b"target-server") @@ -971,7 +980,7 @@ class MatrixFederationAgentTests(TestCase): self.reactor.pump((1000.0,)) # now it should connect again - fetch_d = well_known_resolver.get_well_known(b"testserv") + fetch_d = self.well_known_resolver.get_well_known(b"testserv") self.assertEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) @@ -992,15 +1001,9 @@ class MatrixFederationAgentTests(TestCase): it ignores transient errors. """ - well_known_resolver = WellKnownResolver( - self.reactor, - Agent(self.reactor, contextFactory=self.tls_factory), - well_known_cache=self.well_known_cache, - ) - self.reactor.lookups["testserv"] = "1.2.3.4" - fetch_d = well_known_resolver.get_well_known(b"testserv") + fetch_d = self.well_known_resolver.get_well_known(b"testserv") # there should be an attempt to connect on port 443 for the .well-known clients = self.reactor.tcpClients @@ -1026,27 +1029,37 @@ class MatrixFederationAgentTests(TestCase): # another lookup. self.reactor.pump((900.0,)) - fetch_d = well_known_resolver.get_well_known(b"testserv") - clients = self.reactor.tcpClients - (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + fetch_d = self.well_known_resolver.get_well_known(b"testserv") - # fonx the connection attempt, this will be treated as a temporary - # failure. - client_factory.clientConnectionFailed(None, Exception("nope")) + # The resolver may retry a few times, so fonx all requests that come along + attempts = 0 + while self.reactor.tcpClients: + clients = self.reactor.tcpClients + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) - # attemptdelay on the hostnameendpoint is 0.3, so takes that long before the - # .well-known request fails. - self.reactor.pump((0.4,)) + attempts += 1 + + # fonx the connection attempt, this will be treated as a temporary + # failure. + client_factory.clientConnectionFailed(None, Exception("nope")) + + # There's a few sleeps involved, so we have to pump the reactor a + # bit. + self.reactor.pump((1.0, 1.0)) + + # We expect to see more than one attempt as there was previously a valid + # well known. + self.assertGreater(attempts, 1) # Resolver should return cached value, despite the lookup failing. r = self.successResultOf(fetch_d) self.assertEqual(r.delegated_server, b"target-server") - # Expire the cache and repeat the request - self.reactor.pump((100.0,)) + # Expire both caches and repeat the request + self.reactor.pump((10000.0,)) # Repated the request, this time it should fail if the lookup fails. - fetch_d = well_known_resolver.get_well_known(b"testserv") + fetch_d = self.well_known_resolver.get_well_known(b"testserv") clients = self.reactor.tcpClients (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) -- cgit 1.4.1 From 1771f0045d035b8057ba8766ebd5deab230725d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Aug 2019 10:54:26 +0100 Subject: Newsfile --- changelog.d/5850.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5850.misc diff --git a/changelog.d/5850.misc b/changelog.d/5850.misc new file mode 100644 index 0000000000..c4f879ca2f --- /dev/null +++ b/changelog.d/5850.misc @@ -0,0 +1 @@ +Retry well-known lookups if we have recently seen a valid well-known record for the server. -- cgit 1.4.1 From 6fadb560fcdc92466b0b5cac02551cb41ca9f148 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 14 Aug 2019 13:30:36 +0100 Subject: Support MSC2197 outbound with unstable prefix Signed-off-by: Olivier Wilkinson (reivilibre) --- synapse/federation/transport/client.py | 46 +++++++++++++++++++++++----------- synapse/handlers/room_list.py | 29 ++++++++++++++++++++- 2 files changed, 59 insertions(+), 16 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 0cea0d2a10..2e99f77eb1 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -327,21 +327,37 @@ class TransportLayerClient(object): include_all_networks=False, third_party_instance_id=None, ): - path = _create_v1_path("/publicRooms") - - args = {"include_all_networks": "true" if include_all_networks else "false"} - if third_party_instance_id: - args["third_party_instance_id"] = (third_party_instance_id,) - if limit: - args["limit"] = [str(limit)] - if since_token: - args["since"] = [since_token] - - # TODO(erikj): Actually send the search_filter across federation. - - response = yield self.client.get_json( - destination=remote_server, path=path, args=args, ignore_backoff=True - ) + if search_filter: + # TODO(MSC2197): Move to V1 prefix + path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/publicRooms") + + data = {"include_all_networks": "true" if include_all_networks else "false"} + if third_party_instance_id: + data["third_party_instance_id"] = third_party_instance_id + if limit: + data["limit"] = str(limit) + if since_token: + data["since"] = since_token + + data["filter"] = search_filter + + response = yield self.client.post_json( + destination=remote_server, path=path, data=data, ignore_backoff=True + ) + else: + path = _create_v1_path("/publicRooms") + + args = {"include_all_networks": "true" if include_all_networks else "false"} + if third_party_instance_id: + args["third_party_instance_id"] = (third_party_instance_id,) + if limit: + args["limit"] = [str(limit)] + if since_token: + args["since"] = [since_token] + + response = yield self.client.get_json( + destination=remote_server, path=path, args=args, ignore_backoff=True + ) return response diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index e9094ad02b..a7e55f00e5 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -25,6 +25,7 @@ from unpaddedbase64 import decode_base64, encode_base64 from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules +from synapse.api.errors import Codes, HttpResponseException from synapse.types import ThirdPartyInstanceID from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.descriptors import cachedInlineCallbacks @@ -485,7 +486,33 @@ class RoomListHandler(BaseHandler): return {"chunk": [], "total_room_count_estimate": 0} if search_filter: - # We currently don't support searching across federation, so we have + # Searching across federation is defined in MSC2197. + # However, the remote homeserver may or may not actually support it. + # So we first try an MSC2197 remote-filtered search, then fall back + # to a locally-filtered search if we must. + + try: + res = yield self._get_remote_list_cached( + server_name, + limit=limit, + since_token=since_token, + include_all_networks=include_all_networks, + third_party_instance_id=third_party_instance_id, + search_filter=search_filter, + ) + return res + except HttpResponseException as hre: + syn_err = hre.to_synapse_error() + if hre.code in (404, 405) or syn_err.errcode in ( + Codes.UNRECOGNIZED, + Codes.NOT_FOUND, + ): + logger.debug("Falling back to locally-filtered /publicRooms") + else: + raise # Not an error that should trigger a fallback. + + # if we reach this point, then we fall back to the situation where + # we currently don't support searching across federation, so we have # to do it manually without pagination limit = None since_token = None -- cgit 1.4.1 From 2253b083d9b3cc0aba89aba98298214cf960acd7 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Thu, 15 Aug 2019 11:06:21 +0100 Subject: Add support for inbound MSC2197 requests on unstable Federation API Signed-off-by: Olivier Wilkinson (reivilibre) --- synapse/federation/transport/server.py | 60 +++++++++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ea4e1b6d0f..e17555c4cf 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -751,6 +751,64 @@ class PublicRoomList(BaseFederationServlet): return 200, data +class UnstablePublicRoomList(BaseFederationServlet): + """ + Fetch the public room list for this server. + + This API returns information in the same format as /publicRooms on the + client API, but will only ever include local public rooms and hence is + intended for consumption by other home servers. + + This is the unstable-prefixed version which adds support for MSC2197, which + is still undergoing review. + """ + + PATH = "/publicRooms" + PREFIX = FEDERATION_UNSTABLE_PREFIX + + def __init__(self, handler, authenticator, ratelimiter, server_name, allow_access): + super(UnstablePublicRoomList, self).__init__( + handler, authenticator, ratelimiter, server_name + ) + self.allow_access = allow_access + + # TODO(MSC2197): Move away from Unstable prefix and back to normal prefix + async def on_POST(self, origin, content, query): + if not self.allow_access: + raise FederationDeniedError(origin) + + limit = int(content.get("limit", 100)) + since_token = content.get("since", None) + search_filter = content.get("filter", None) + + include_all_networks = content.get("include_all_networks", False) + third_party_instance_id = content.get("third_party_instance_id", None) + + if include_all_networks: + network_tuple = None + if third_party_instance_id is not None: + raise SynapseError( + 400, "Can't use include_all_networks with an explicit network" + ) + elif third_party_instance_id is None: + network_tuple = ThirdPartyInstanceID(None, None) + else: + network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id) + + if search_filter is None: + logger.warning("Nonefilter") + + data = await self.handler.get_local_public_room_list( + limit=limit, + since_token=since_token, + search_filter=search_filter, + network_tuple=network_tuple, + from_federation=True, + ) + + return 200, data + + class FederationVersionServlet(BaseFederationServlet): PATH = "/version" @@ -1315,7 +1373,7 @@ FEDERATION_SERVLET_CLASSES = ( OPENID_SERVLET_CLASSES = (OpenIdUserInfo,) -ROOM_LIST_CLASSES = (PublicRoomList,) +ROOM_LIST_CLASSES = (PublicRoomList, UnstablePublicRoomList) GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsProfileServlet, -- cgit 1.4.1 From a3df04a899f9feccf145e167e7ddc5228fa927e2 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Thu, 15 Aug 2019 11:08:54 +0100 Subject: Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/5859.feature | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 changelog.d/5859.feature diff --git a/changelog.d/5859.feature b/changelog.d/5859.feature new file mode 100644 index 0000000000..c897c66037 --- /dev/null +++ b/changelog.d/5859.feature @@ -0,0 +1,2 @@ +Add unstable support for MSC2197 (filtered search requests over federation), in +order to allow upcoming room directory query performance improvements. -- cgit 1.4.1 From f299c5414c2dd300103b0e11e7114123d8eb58a1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Aug 2019 15:30:04 +0100 Subject: Refactor MatrixFederationAgent to retry SRV. This refactors MatrixFederationAgent to move the SRV lookup into the endpoint code, this has two benefits: 1. Its easier to retry different host/ports in the same way as HostnameEndpoint. 2. We avoid SRV lookups if we have a free connection in the pool --- synapse/http/federation/matrix_federation_agent.py | 356 ++++++++++----------- synapse/http/federation/srv_resolver.py | 35 +- .../federation/test_matrix_federation_agent.py | 63 +++- tests/http/federation/test_srv_resolver.py | 8 +- 4 files changed, 268 insertions(+), 194 deletions(-) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 71a15f434d..c208185791 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -14,21 +14,21 @@ # limitations under the License. import logging +import urllib -import attr -from netaddr import IPAddress +from netaddr import AddrFormatError, IPAddress from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.internet.interfaces import IStreamClientEndpoint -from twisted.web.client import URI, Agent, HTTPConnectionPool +from twisted.web.client import Agent, HTTPConnectionPool from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent +from twisted.web.iweb import IAgent, IAgentEndpointFactory -from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list +from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.http.federation.well_known_resolver import WellKnownResolver -from synapse.logging.context import make_deferred_yieldable +from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.util import Clock logger = logging.getLogger(__name__) @@ -36,8 +36,9 @@ logger = logging.getLogger(__name__) @implementer(IAgent) class MatrixFederationAgent(object): - """An Agent-like thing which provides a `request` method which will look up a matrix - server and send an HTTP request to it. + """An Agent-like thing which provides a `request` method which correctly + handles resolving matrix server names when using matrix://. Handles standard + https URIs as normal. Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.) @@ -65,23 +66,25 @@ class MatrixFederationAgent(object): ): self._reactor = reactor self._clock = Clock(reactor) - - self._tls_client_options_factory = tls_client_options_factory - if _srv_resolver is None: - _srv_resolver = SrvResolver() - self._srv_resolver = _srv_resolver - self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 + self._agent = Agent.usingEndpointFactory( + self._reactor, + MatrixHostnameEndpointFactory( + reactor, tls_client_options_factory, _srv_resolver + ), + pool=self._pool, + ) + self._well_known_resolver = WellKnownResolver( self._reactor, agent=Agent( self._reactor, - pool=self._pool, contextFactory=tls_client_options_factory, + pool=self._pool, ), well_known_cache=_well_known_cache, ) @@ -91,19 +94,15 @@ class MatrixFederationAgent(object): """ Args: method (bytes): HTTP method: GET/POST/etc - uri (bytes): Absolute URI to be retrieved - headers (twisted.web.http_headers.Headers|None): HTTP headers to send with the request, or None to send no extra headers. - bodyProducer (twisted.web.iweb.IBodyProducer|None): An object which can generate bytes to make up the body of this request (for example, the properly encoded contents of a file for a file upload). Or None if the request is to have no body. - Returns: Deferred[twisted.web.iweb.IResponse]: fires when the header of the response has been received (regardless of the @@ -111,210 +110,195 @@ class MatrixFederationAgent(object): response from being received (including problems that prevent the request from being sent). """ - parsed_uri = URI.fromBytes(uri, defaultPort=-1) - res = yield self._route_matrix_uri(parsed_uri) + # We use urlparse as that will set `port` to None if there is no + # explicit port. + parsed_uri = urllib.parse.urlparse(uri) - # set up the TLS connection params + # If this is a matrix:// URI check if the server has delegated matrix + # traffic using well-known delegation. # - # XXX disabling TLS is really only supported here for the benefit of the - # unit tests. We should make the UTs cope with TLS rather than having to make - # the code support the unit tests. - if self._tls_client_options_factory is None: - tls_options = None - else: - tls_options = self._tls_client_options_factory.get_options( - res.tls_server_name.decode("ascii") + # We have to do this here and not in the endpoint as we need to rewrite + # the host header with the delegated server name. + delegated_server = None + if ( + parsed_uri.scheme == b"matrix" + and not _is_ip_literal(parsed_uri.hostname) + and not parsed_uri.port + ): + well_known_result = yield self._well_known_resolver.get_well_known( + parsed_uri.hostname + ) + delegated_server = well_known_result.delegated_server + + if delegated_server: + # Ok, the server has delegated matrix traffic to somewhere else, so + # lets rewrite the URL to replace the server with the delegated + # server name. + uri = urllib.parse.urlunparse( + ( + parsed_uri.scheme, + delegated_server, + parsed_uri.path, + parsed_uri.params, + parsed_uri.query, + parsed_uri.fragment, + ) ) + parsed_uri = urllib.parse.urlparse(uri) - # make sure that the Host header is set correctly + # We need to make sure the host header is set to the netloc of the + # server. if headers is None: headers = Headers() else: headers = headers.copy() if not headers.hasHeader(b"host"): - headers.addRawHeader(b"host", res.host_header) + headers.addRawHeader(b"host", parsed_uri.netloc) - class EndpointFactory(object): - @staticmethod - def endpointForURI(_uri): - ep = LoggingHostnameEndpoint( - self._reactor, res.target_host, res.target_port - ) - if tls_options is not None: - ep = wrapClientTLS(tls_options, ep) - return ep + with PreserveLoggingContext(): + res = yield self._agent.request(method, uri, headers, bodyProducer) - agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool) - res = yield make_deferred_yieldable( - agent.request(method, uri, headers, bodyProducer) - ) return res - @defer.inlineCallbacks - def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): - """Helper for `request`: determine the routing for a Matrix URI - Args: - parsed_uri (twisted.web.client.URI): uri to route. Note that it should be - parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1 - if there is no explicit port given. +@implementer(IAgentEndpointFactory) +class MatrixHostnameEndpointFactory(object): + """Factory for MatrixHostnameEndpoint for parsing to an Agent. + """ - lookup_well_known (bool): True if we should look up the .well-known file if - there is no SRV record. + def __init__(self, reactor, tls_client_options_factory, srv_resolver): + self._reactor = reactor + self._tls_client_options_factory = tls_client_options_factory - Returns: - Deferred[_RoutingResult] - """ - # check for an IP literal - try: - ip_address = IPAddress(parsed_uri.host.decode("ascii")) - except Exception: - # not an IP address - ip_address = None - - if ip_address: - port = parsed_uri.port - if port == -1: - port = 8448 - return _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=port, - ) + if srv_resolver is None: + srv_resolver = SrvResolver() - if parsed_uri.port != -1: - # there is an explicit port - return _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=parsed_uri.port, - ) + self._srv_resolver = srv_resolver - if lookup_well_known: - # try a .well-known lookup - well_known_result = yield self._well_known_resolver.get_well_known( - parsed_uri.host - ) - well_known_server = well_known_result.delegated_server - - if well_known_server: - # if we found a .well-known, start again, but don't do another - # .well-known lookup. - - # parse the server name in the .well-known response into host/port. - # (This code is lifted from twisted.web.client.URI.fromBytes). - if b":" in well_known_server: - well_known_host, well_known_port = well_known_server.rsplit(b":", 1) - try: - well_known_port = int(well_known_port) - except ValueError: - # the part after the colon could not be parsed as an int - # - we assume it is an IPv6 literal with no port (the closing - # ']' stops it being parsed as an int) - well_known_host, well_known_port = well_known_server, -1 - else: - well_known_host, well_known_port = well_known_server, -1 - - new_uri = URI( - scheme=parsed_uri.scheme, - netloc=well_known_server, - host=well_known_host, - port=well_known_port, - path=parsed_uri.path, - params=parsed_uri.params, - query=parsed_uri.query, - fragment=parsed_uri.fragment, - ) + def endpointForURI(self, parsed_uri): + return MatrixHostnameEndpoint( + self._reactor, + self._tls_client_options_factory, + self._srv_resolver, + parsed_uri, + ) - res = yield self._route_matrix_uri(new_uri, lookup_well_known=False) - return res - - # try a SRV lookup - service_name = b"_matrix._tcp.%s" % (parsed_uri.host,) - server_list = yield self._srv_resolver.resolve_service(service_name) - - if not server_list: - target_host = parsed_uri.host - port = 8448 - logger.debug( - "No SRV record for %s, using %s:%i", - parsed_uri.host.decode("ascii"), - target_host.decode("ascii"), - port, - ) + +@implementer(IStreamClientEndpoint) +class MatrixHostnameEndpoint(object): + """An endpoint that resolves matrix:// URLs using Matrix server name + resolution (i.e. via SRV). Does not check for well-known delegation. + """ + + def __init__(self, reactor, tls_client_options_factory, srv_resolver, parsed_uri): + self._reactor = reactor + + # We reparse the URI so that defaultPort is -1 rather than 80 + self._parsed_uri = parsed_uri + + # set up the TLS connection params + # + # XXX disabling TLS is really only supported here for the benefit of the + # unit tests. We should make the UTs cope with TLS rather than having to make + # the code support the unit tests. + + if tls_client_options_factory is None: + self._tls_options = None else: - target_host, port = pick_server_from_list(server_list) - logger.debug( - "Picked %s:%i from SRV records for %s", - target_host.decode("ascii"), - port, - parsed_uri.host.decode("ascii"), + self._tls_options = tls_client_options_factory.get_options( + self._parsed_uri.host.decode("ascii") ) - return _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=target_host, - target_port=port, - ) + self._srv_resolver = srv_resolver + @defer.inlineCallbacks + def connect(self, protocol_factory): + """Implements IStreamClientEndpoint interface + """ -@implementer(IStreamClientEndpoint) -class LoggingHostnameEndpoint(object): - """A wrapper for HostnameEndpint which logs when it connects""" + first_exception = None - def __init__(self, reactor, host, port, *args, **kwargs): - self.host = host - self.port = port - self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs) + server_list = yield self._resolve_server() - def connect(self, protocol_factory): - logger.info("Connecting to %s:%i", self.host.decode("ascii"), self.port) - return self.ep.connect(protocol_factory) + for server in server_list: + host = server.host + port = server.port + try: + logger.info("Connecting to %s:%i", host.decode("ascii"), port) + endpoint = HostnameEndpoint(self._reactor, host, port) + if self._tls_options: + endpoint = wrapClientTLS(self._tls_options, endpoint) + result = yield make_deferred_yieldable( + endpoint.connect(protocol_factory) + ) -@attr.s -class _RoutingResult(object): - """The result returned by `_route_matrix_uri`. + return result + except Exception as e: + logger.info( + "Failed to connect to %s:%i: %s", host.decode("ascii"), port, e + ) + if not first_exception: + first_exception = e - Contains the parameters needed to direct a federation connection to a particular - server. + # We return the first failure because that's probably the most interesting. + if first_exception: + raise first_exception - Where a SRV record points to several servers, this object contains a single server - chosen from the list. - """ + # This shouldn't happen as we should always have at least one host/port + # to try and if that doesn't work then we'll have an exception. + raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,)) - host_header = attr.ib() - """ - The value we should assign to the Host header (host:port from the matrix - URI, or .well-known). + @defer.inlineCallbacks + def _resolve_server(self): + """Resolves the server name to a list of hosts and ports to attempt to + connect to. - :type: bytes - """ + Returns: + Deferred[list[Server]] + """ - tls_server_name = attr.ib() - """ - The server name we should set in the SNI (typically host, without port, from the - matrix URI or .well-known) + if self._parsed_uri.scheme != b"matrix": + return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)] - :type: bytes - """ + # Note: We don't do well-known lookup as that needs to have happened + # before now, due to needing to rewrite the Host header of the HTTP + # request. - target_host = attr.ib() - """ - The hostname (or IP literal) we should route the TCP connection to (the target of the - SRV record, or the hostname from the URL/.well-known) + parsed_uri = urllib.parse.urlparse(self._parsed_uri.toBytes()) - :type: bytes - """ + host = parsed_uri.hostname + port = parsed_uri.port - target_port = attr.ib() - """ - The port we should route the TCP connection to (the target of the SRV record, or - the port from the URL/.well-known, or 8448) + # If there is an explicit port or the host is an IP address we bypass + # SRV lookups and just use the given host/port. + if port or _is_ip_literal(host): + return [Server(host, port or 8448)] - :type: int + server_list = yield self._srv_resolver.resolve_service(b"_matrix._tcp." + host) + + if server_list: + return server_list + + # No SRV records, so we fallback to host and 8448 + return [Server(host, 8448)] + + +def _is_ip_literal(host): + """Test if the given host name is either an IPv4 or IPv6 literal. + + Args: + host (bytes) + + Returns: + bool """ + + host = host.decode("ascii") + + try: + IPAddress(host) + return True + except AddrFormatError: + return False diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index b32188766d..bbda0a23f4 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -32,7 +32,7 @@ logger = logging.getLogger(__name__) SERVER_CACHE = {} -@attr.s +@attr.s(slots=True, frozen=True) class Server(object): """ Our record of an individual server which can be tried to reach a destination. @@ -83,6 +83,35 @@ def pick_server_from_list(server_list): raise RuntimeError("pick_server_from_list got to end of eligible server list.") +def _sort_server_list(server_list): + """Given a list of SRV records sort them into priority order and shuffle + each priority with the given weight. + """ + priority_map = {} + + for server in server_list: + priority_map.setdefault(server.priority, []).append(server) + + results = [] + for priority in sorted(priority_map): + servers = priority_map.pop(priority) + + while servers: + total_weight = sum(s.weight for s in servers) + target_weight = random.randint(0, total_weight) + + for s in servers: + target_weight -= s.weight + + if target_weight <= 0: + break + + results.append(s) + servers.remove(s) + + return results + + class SrvResolver(object): """Interface to the dns client to do SRV lookups, with result caching. @@ -120,7 +149,7 @@ class SrvResolver(object): if cache_entry: if all(s.expires > now for s in cache_entry): servers = list(cache_entry) - return servers + return _sort_server_list(servers) try: answers, _, _ = yield make_deferred_yieldable( @@ -169,4 +198,4 @@ class SrvResolver(object): ) self._cache[service_name] = list(servers) - return servers + return _sort_server_list(servers) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 2c568788b3..f97c8a59f6 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -41,9 +41,9 @@ from synapse.http.federation.well_known_resolver import ( from synapse.logging.context import LoggingContext from synapse.util.caches.ttlcache import TTLCache +from tests import unittest from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.server import FakeTransport, ThreadedMemoryReactorClock -from tests.unittest import TestCase from tests.utils import default_config logger = logging.getLogger(__name__) @@ -67,7 +67,8 @@ def get_connection_factory(): return test_server_connection_factory -class MatrixFederationAgentTests(TestCase): +@unittest.DEBUG +class MatrixFederationAgentTests(unittest.TestCase): def setUp(self): self.reactor = ThreadedMemoryReactorClock() @@ -1056,8 +1057,64 @@ class MatrixFederationAgentTests(TestCase): r = self.successResultOf(fetch_d) self.assertEqual(r.delegated_server, None) + def test_srv_fallbacks(self): + """Test that other SRV results are tried if the first one fails. + """ + + self.mock_resolver.resolve_service.side_effect = lambda _: [ + Server(host=b"target.com", port=8443), + Server(host=b"target.com", port=8444), + ] + self.reactor.lookups["target.com"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + self.mock_resolver.resolve_service.assert_called_once_with( + b"_matrix._tcp.testserv" + ) + + # We should see an attempt to connect to the first server + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # Fonx the connection + client_factory.clientConnectionFailed(None, Exception("nope")) + + # There's a 300ms delay in HostnameEndpoint + self.reactor.pump((0.4,)) + + # Hasn't failed yet + self.assertNoResult(test_d) + + # We shouldnow see an attempt to connect to the second server + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8444) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=b"testserv") + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"]) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + -class TestCachePeriodFromHeaders(TestCase): +class TestCachePeriodFromHeaders(unittest.TestCase): def test_cache_control(self): # uppercase self.assertEqual( diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 3b885ef64b..df034ab237 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -83,8 +83,10 @@ class SrvResolverTestCase(unittest.TestCase): service_name = b"test_service.example.com" - entry = Mock(spec_set=["expires"]) + entry = Mock(spec_set=["expires", "priority", "weight"]) entry.expires = 0 + entry.priority = 0 + entry.weight = 0 cache = {service_name: [entry]} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) @@ -105,8 +107,10 @@ class SrvResolverTestCase(unittest.TestCase): service_name = b"test_service.example.com" - entry = Mock(spec_set=["expires"]) + entry = Mock(spec_set=["expires", "priority", "weight"]) entry.expires = 999999999 + entry.priority = 0 + entry.weight = 0 cache = {service_name: [entry]} resolver = SrvResolver( -- cgit 1.4.1 From c03e3e83010d0147515e3771353af6b89bf8cf03 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 15 Aug 2019 15:33:22 +0100 Subject: Newsfile --- changelog.d/5864.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5864.misc diff --git a/changelog.d/5864.misc b/changelog.d/5864.misc new file mode 100644 index 0000000000..40ac11db64 --- /dev/null +++ b/changelog.d/5864.misc @@ -0,0 +1 @@ +Correctly retry all hosts returned from SRV when we fail to connect. -- cgit 1.4.1 From 861d663c15a8103f5599f0bdda7d1d3ae764fd8f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 16 Aug 2019 13:15:26 +0100 Subject: Fixup changelog and remove debug logging --- changelog.d/5850.feature | 1 + changelog.d/5850.misc | 1 - synapse/http/federation/well_known_resolver.py | 5 +---- 3 files changed, 2 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5850.feature delete mode 100644 changelog.d/5850.misc diff --git a/changelog.d/5850.feature b/changelog.d/5850.feature new file mode 100644 index 0000000000..b565929a54 --- /dev/null +++ b/changelog.d/5850.feature @@ -0,0 +1 @@ +Add retry to well-known lookups if we have recently seen a valid well-known record for the server. diff --git a/changelog.d/5850.misc b/changelog.d/5850.misc deleted file mode 100644 index c4f879ca2f..0000000000 --- a/changelog.d/5850.misc +++ /dev/null @@ -1 +0,0 @@ -Retry well-known lookups if we have recently seen a valid well-known record for the server. diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index d59864e298..c846003886 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -163,9 +163,7 @@ class WellKnownResolver(object): Deferred[Tuple[bytes,int]]: The lookup result and cache period. """ - had_valid_well_known = bool( - self._had_valid_well_known_cache.get(server_name, False) - ) + had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False) # We do this in two steps to differentiate between possibly transient # errors (e.g. can't connect to host, 503 response) and more permenant @@ -247,7 +245,6 @@ class WellKnownResolver(object): # Bail if we've been cancelled raise except Exception as e: - logger.info("Retry: %s", retry) if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS: logger.info("Error fetching %s: %s", uri_str, e) raise _FetchWellKnownFailure(temporary=True) -- cgit 1.4.1 From bb29bc29374d10d151ebff13c4e95e07c0ef3a29 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 20 Aug 2019 08:49:31 +0100 Subject: Use MSC2197 on stable prefix as it has almost finished FCP Signed-off-by: Olivier Wilkinson (reivilibre) --- synapse/federation/transport/client.py | 4 ++-- synapse/federation/transport/server.py | 26 ++------------------------ 2 files changed, 4 insertions(+), 26 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 2e99f77eb1..482a101c09 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -328,8 +328,8 @@ class TransportLayerClient(object): third_party_instance_id=None, ): if search_filter: - # TODO(MSC2197): Move to V1 prefix - path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/publicRooms") + # this uses MSC2197 (Search Filtering over Federation) + path = _create_v1_path("/publicRooms") data = {"include_all_networks": "true" if include_all_networks else "false"} if third_party_instance_id: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index e17555c4cf..027b33f67e 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -750,30 +750,8 @@ class PublicRoomList(BaseFederationServlet): ) return 200, data - -class UnstablePublicRoomList(BaseFederationServlet): - """ - Fetch the public room list for this server. - - This API returns information in the same format as /publicRooms on the - client API, but will only ever include local public rooms and hence is - intended for consumption by other home servers. - - This is the unstable-prefixed version which adds support for MSC2197, which - is still undergoing review. - """ - - PATH = "/publicRooms" - PREFIX = FEDERATION_UNSTABLE_PREFIX - - def __init__(self, handler, authenticator, ratelimiter, server_name, allow_access): - super(UnstablePublicRoomList, self).__init__( - handler, authenticator, ratelimiter, server_name - ) - self.allow_access = allow_access - - # TODO(MSC2197): Move away from Unstable prefix and back to normal prefix async def on_POST(self, origin, content, query): + # This implements MSC2197 (Search Filtering over Federation) if not self.allow_access: raise FederationDeniedError(origin) @@ -1373,7 +1351,7 @@ FEDERATION_SERVLET_CLASSES = ( OPENID_SERVLET_CLASSES = (OpenIdUserInfo,) -ROOM_LIST_CLASSES = (PublicRoomList, UnstablePublicRoomList) +ROOM_LIST_CLASSES = (PublicRoomList,) GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsProfileServlet, -- cgit 1.4.1 From 502728777c00e3242f7436c18b8d918b6613d377 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 20 Aug 2019 08:49:53 +0100 Subject: Newsfile on one line Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/5859.feature | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/changelog.d/5859.feature b/changelog.d/5859.feature index c897c66037..52df7fc81b 100644 --- a/changelog.d/5859.feature +++ b/changelog.d/5859.feature @@ -1,2 +1 @@ -Add unstable support for MSC2197 (filtered search requests over federation), in -order to allow upcoming room directory query performance improvements. +Add unstable support for MSC2197 (filtered search requests over federation), in order to allow upcoming room directory query performance improvements. -- cgit 1.4.1 From 1dec31560e5712306e368a0adc6d9f84f924bdc9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2019 11:46:00 +0100 Subject: Change jitter to be a factor rather than absolute value --- synapse/http/federation/well_known_resolver.py | 23 +++++++++++----------- .../federation/test_matrix_federation_agent.py | 4 ++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index c846003886..5e9b0befb0 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -32,8 +32,8 @@ from synapse.util.metrics import Measure # period to cache .well-known results for by default WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600 -# jitter to add to the .well-known default cache ttl -WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60 +# jitter factor to add to the .well-known default cache ttls +WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1 # period to cache failure to fetch .well-known for WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600 @@ -133,16 +133,14 @@ class WellKnownResolver(object): # We have recently seen a valid well-known record for this # server, so we cache the lack of well-known for a shorter time. cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD - cache_period += random.uniform( - 0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER - ) else: - # add some randomness to the TTL to avoid a stampeding herd every hour - # after startup cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD - cache_period += random.uniform( - 0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER - ) + + # add some randomness to the TTL to avoid a stampeding herd + cache_period *= random.uniform( + 1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER, + 1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER, + ) if cache_period > 0: self._well_known_cache.set(server_name, result, cache_period) @@ -194,7 +192,10 @@ class WellKnownResolver(object): cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD # add some randomness to the TTL to avoid a stampeding herd every 24 hours # after startup - cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) + cache_period *= random.uniform( + 1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER, + 1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER, + ) else: cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 4d3f31d18c..c55aad8e11 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -551,7 +551,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(self.well_known_cache[b"testserv"], b"target-server") # check the cache expires - self.reactor.pump((25 * 3600,)) + self.reactor.pump((48 * 3600,)) self.well_known_cache.expire() self.assertNotIn(b"testserv", self.well_known_cache) @@ -639,7 +639,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(self.well_known_cache[b"testserv"], b"target-server") # check the cache expires - self.reactor.pump((25 * 3600,)) + self.reactor.pump((48 * 3600,)) self.well_known_cache.expire() self.assertNotIn(b"testserv", self.well_known_cache) -- cgit 1.4.1 From 7777d353bfffc840b79391da107e593338a1a2fe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2019 11:46:54 +0100 Subject: Remove test debugs --- tests/federation/test_federation_server.py | 1 - tests/http/federation/test_matrix_federation_agent.py | 1 - tests/test_visibility.py | 1 - 3 files changed, 3 deletions(-) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index af15f4cc5a..b08be451aa 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -20,7 +20,6 @@ from synapse.federation.federation_server import server_matches_acl_event from tests import unittest -@unittest.DEBUG class ServerACLsTestCase(unittest.TestCase): def test_blacklisted_server(self): e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]}) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index f97c8a59f6..445a0e76ab 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -67,7 +67,6 @@ def get_connection_factory(): return test_server_connection_factory -@unittest.DEBUG class MatrixFederationAgentTests(unittest.TestCase): def setUp(self): self.reactor = ThreadedMemoryReactorClock() diff --git a/tests/test_visibility.py b/tests/test_visibility.py index e0605dac2f..18f1a0035d 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -74,7 +74,6 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id) self.assertEqual(filtered[i].content["a"], "b") - @tests.unittest.DEBUG @defer.inlineCallbacks def test_erased_user(self): # 4 message events, from erased and unerased users, with a membership -- cgit 1.4.1 From 1f9df1cc7ba7027aef3a38d01909a928ecf2a8c5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2019 11:49:44 +0100 Subject: Fixup _sort_server_list to be slightly more efficient Also document that we are using the algorithm described in RFC2782 and ensure we handle zero weight correctly. --- synapse/http/federation/srv_resolver.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index bbda0a23f4..110b112e85 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -94,10 +94,18 @@ def _sort_server_list(server_list): results = [] for priority in sorted(priority_map): - servers = priority_map.pop(priority) + servers = priority_map[priority] + # This algorithms follows the algorithm described in RFC2782. + # + # N.B. Weights can be zero, which means that you should pick that server + # last *or* that its the only server in this priority. + + # We sort to ensure zero weighted items are first. + servers.sort(key=lambda s: s.weight) + + total_weight = sum(s.weight for s in servers) while servers: - total_weight = sum(s.weight for s in servers) target_weight = random.randint(0, total_weight) for s in servers: @@ -108,6 +116,7 @@ def _sort_server_list(server_list): results.append(s) servers.remove(s) + total_weight -= s.weight return results -- cgit 1.4.1 From 74f016d343fe270ab3affe79cc82266d94120e5c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2019 11:50:12 +0100 Subject: Remove now unused pick_server_from_list --- synapse/http/federation/srv_resolver.py | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index 110b112e85..c8ca3fd0e9 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -53,36 +53,6 @@ class Server(object): expires = attr.ib(default=0) -def pick_server_from_list(server_list): - """Randomly choose a server from the server list - - Args: - server_list (list[Server]): list of candidate servers - - Returns: - Tuple[bytes, int]: (host, port) pair for the chosen server - """ - if not server_list: - raise RuntimeError("pick_server_from_list called with empty list") - - # TODO: currently we only use the lowest-priority servers. We should maintain a - # cache of servers known to be "down" and filter them out - - min_priority = min(s.priority for s in server_list) - eligible_servers = list(s for s in server_list if s.priority == min_priority) - total_weight = sum(s.weight for s in eligible_servers) - target_weight = random.randint(0, total_weight) - - for s in eligible_servers: - target_weight -= s.weight - - if target_weight <= 0: - return s.host, s.port - - # this should be impossible. - raise RuntimeError("pick_server_from_list got to end of eligible server list.") - - def _sort_server_list(server_list): """Given a list of SRV records sort them into priority order and shuffle each priority with the given weight. -- cgit 1.4.1 From 29763f01c63f1c5d5053dad413b69f1980208131 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2019 12:38:06 +0100 Subject: Make changelog entry be a feature --- changelog.d/5864.feature | 1 + changelog.d/5864.misc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/5864.feature delete mode 100644 changelog.d/5864.misc diff --git a/changelog.d/5864.feature b/changelog.d/5864.feature new file mode 100644 index 0000000000..40ac11db64 --- /dev/null +++ b/changelog.d/5864.feature @@ -0,0 +1 @@ +Correctly retry all hosts returned from SRV when we fail to connect. diff --git a/changelog.d/5864.misc b/changelog.d/5864.misc deleted file mode 100644 index 40ac11db64..0000000000 --- a/changelog.d/5864.misc +++ /dev/null @@ -1 +0,0 @@ -Correctly retry all hosts returned from SRV when we fail to connect. -- cgit 1.4.1 From 5906be858900e134d99dd94f0ca9e8bd1db14c05 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Aug 2019 15:27:08 +0100 Subject: Add config option for keys to use to sign keys This allows servers to separate keys that are used to sign remote keys when acting as a notary server. --- docs/sample_config.yaml | 8 ++++++++ synapse/config/key.py | 35 +++++++++++++++++++++++++++++++---- synapse/crypto/keyring.py | 12 +++++++----- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 0c6be30e51..c96eb0cf2d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1027,6 +1027,14 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # #trusted_key_servers: # - server_name: "matrix.org" +# + +# The additional signing keys to use when acting as a trusted key server, on +# top of the normal signing keys. +# +# Can contain multiple keys, one per line. +# +#key_server_signing_keys_path: "key_server_signing_keys.key" # Enable SAML2 for registration and login. Uses pysaml2. diff --git a/synapse/config/key.py b/synapse/config/key.py index fe8386985c..f1a1efcb7f 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -76,7 +76,7 @@ class KeyConfig(Config): config_dir_path, config["server_name"] + ".signing.key" ) - self.signing_key = self.read_signing_key(signing_key_path) + self.signing_key = self.read_signing_keys(signing_key_path, "signing_key") self.old_signing_keys = self.read_old_signing_keys( config.get("old_signing_keys", {}) @@ -85,6 +85,15 @@ class KeyConfig(Config): config.get("key_refresh_interval", "1d") ) + self.key_server_signing_keys = list(self.signing_key) + key_server_signing_keys_path = config.get("key_server_signing_keys_path") + if key_server_signing_keys_path: + self.key_server_signing_keys.extend( + self.read_signing_keys( + key_server_signing_keys_path, "key_server_signing_keys_path" + ) + ) + # if neither trusted_key_servers nor perspectives are given, use the default. if "perspectives" not in config and "trusted_key_servers" not in config: key_servers = [{"server_name": "matrix.org"}] @@ -210,16 +219,34 @@ class KeyConfig(Config): # #trusted_key_servers: # - server_name: "matrix.org" + # + + # The additional signing keys to use when acting as a trusted key server, on + # top of the normal signing keys. + # + # Can contain multiple keys, one per line. + # + #key_server_signing_keys_path: "key_server_signing_keys.key" """ % locals() ) - def read_signing_key(self, signing_key_path): - signing_keys = self.read_file(signing_key_path, "signing_key") + def read_signing_keys(self, signing_key_path, name): + """Read the signing keys in the given path. + + Args: + signing_key_path (str) + name (str): Associated config key name + + Returns: + list[SigningKey] + """ + + signing_keys = self.read_file(signing_key_path, name) try: return read_signing_keys(signing_keys.splitlines(True)) except Exception as e: - raise ConfigError("Error reading signing_key: %s" % (str(e))) + raise ConfigError("Error reading %s: %s" % (name, str(e))) def read_old_signing_keys(self, old_signing_keys): keys = {} diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6c3e885e72..a3b55e349e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -540,11 +540,13 @@ class BaseV2KeyFetcher(object): verify_key=verify_key, valid_until_ts=key_data["expired_ts"] ) - # re-sign the json with our own key, so that it is ready if we are asked to - # give it out as a notary server - signed_key_json = sign_json( - response_json, self.config.server_name, self.config.signing_key[0] - ) + # re-sign the json with our own keys, so that it is ready if we are + # asked to give it out as a notary server + signed_key_json = response_json + for signing_key in self.config.key_server_signing_keys: + signed_key_json = sign_json( + signed_key_json, self.config.server_name, signing_key + ) signed_key_json_bytes = encode_canonical_json(signed_key_json) -- cgit 1.4.1 From 97cbc96093dcd878bc823f34d71437a08786a3e4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 21 Aug 2019 10:39:45 +0100 Subject: Only sign when we respond to remote key requests --- synapse/crypto/keyring.py | 11 +---------- synapse/rest/key/v2/remote_key_resource.py | 28 +++++++++++++++------------- 2 files changed, 16 insertions(+), 23 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index a3b55e349e..abeb0ac26e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -30,7 +30,6 @@ from signedjson.key import ( from signedjson.sign import ( SignatureVerifyException, encode_canonical_json, - sign_json, signature_ids, verify_signed_json, ) @@ -540,15 +539,7 @@ class BaseV2KeyFetcher(object): verify_key=verify_key, valid_until_ts=key_data["expired_ts"] ) - # re-sign the json with our own keys, so that it is ready if we are - # asked to give it out as a notary server - signed_key_json = response_json - for signing_key in self.config.key_server_signing_keys: - signed_key_json = sign_json( - signed_key_json, self.config.server_name, signing_key - ) - - signed_key_json_bytes = encode_canonical_json(signed_key_json) + signed_key_json_bytes = encode_canonical_json(response_json) yield make_deferred_yieldable( defer.gatherResults( diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 031a316693..f3398c9523 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -13,7 +13,9 @@ # limitations under the License. import logging -from io import BytesIO + +from canonicaljson import json +from signedjson.sign import sign_json from twisted.internet import defer @@ -95,6 +97,7 @@ class RemoteKey(DirectServeResource): self.store = hs.get_datastore() self.clock = hs.get_clock() self.federation_domain_whitelist = hs.config.federation_domain_whitelist + self.config = hs.config @wrap_json_request_handler async def _async_render_GET(self, request): @@ -214,15 +217,14 @@ class RemoteKey(DirectServeResource): yield self.fetcher.get_keys(cache_misses) yield self.query_keys(request, query, query_remote_on_cache_miss=False) else: - result_io = BytesIO() - result_io.write(b'{"server_keys":') - sep = b"[" - for json_bytes in json_results: - result_io.write(sep) - result_io.write(json_bytes) - sep = b"," - if sep == b"[": - result_io.write(sep) - result_io.write(b"]}") - - respond_with_json_bytes(request, 200, result_io.getvalue()) + signed_keys = [] + for key_json in json_results: + key_json = json.loads(key_json) + for signing_key in self.config.key_server_signing_keys: + key_json = sign_json(key_json, self.config.server_name, signing_key) + + signed_keys.append(key_json) + + results = {"server_keys": signed_keys} + + respond_with_json_bytes(request, 200, json.dumps(results).encode("utf-8")) -- cgit 1.4.1 From 62fb643cdca80568a404c46a255384cd73b6e16b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 21 Aug 2019 10:41:29 +0100 Subject: Newsfile --- changelog.d/5895.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5895.feature diff --git a/changelog.d/5895.feature b/changelog.d/5895.feature new file mode 100644 index 0000000000..c394a3772c --- /dev/null +++ b/changelog.d/5895.feature @@ -0,0 +1 @@ +Add config option to sign remote key query responses with a separate key. -- cgit 1.4.1 From ef1c524bb381545761fdd1ad2a61db1693ddbd3d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 22 Aug 2019 10:42:06 +0100 Subject: Improve error msg when key-fetch fails (#5896) There's no point doing a raise_from here, because the exception is always logged at warn with no stacktrace in the caller. Instead, let's try to give better messages to reduce confusion. In particular, this means that we won't log 'Failed to connect to remote server' when we don't even attempt to connect to the remote server due to blacklisting. --- changelog.d/5896.misc | 1 + synapse/crypto/keyring.py | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5896.misc diff --git a/changelog.d/5896.misc b/changelog.d/5896.misc new file mode 100644 index 0000000000..ed47c747bd --- /dev/null +++ b/changelog.d/5896.misc @@ -0,0 +1 @@ +Improve the logging when we have an error when fetching signing keys. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6c3e885e72..654accc843 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -18,7 +18,6 @@ import logging from collections import defaultdict import six -from six import raise_from from six.moves import urllib import attr @@ -657,9 +656,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): }, ) except (NotRetryingDestination, RequestSendFailed) as e: - raise_from(KeyLookupError("Failed to connect to remote server"), e) + # these both have str() representations which we can't really improve upon + raise KeyLookupError(str(e)) except HttpResponseException as e: - raise_from(KeyLookupError("Remote server returned an error"), e) + raise KeyLookupError("Remote server returned an error: %s" % (e,)) keys = {} added_keys = [] @@ -821,9 +821,11 @@ class ServerKeyFetcher(BaseV2KeyFetcher): timeout=10000, ) except (NotRetryingDestination, RequestSendFailed) as e: - raise_from(KeyLookupError("Failed to connect to remote server"), e) + # these both have str() representations which we can't really improve + # upon + raise KeyLookupError(str(e)) except HttpResponseException as e: - raise_from(KeyLookupError("Remote server returned an error"), e) + raise KeyLookupError("Remote server returned an error: %s" % (e,)) if response["server_name"] != server_name: raise KeyLookupError( -- cgit 1.4.1 From 119aa31b105705390e87f87186f787b32e04ba21 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 22 Aug 2019 10:42:59 +0100 Subject: Servlet to purge old rooms (#5845) --- changelog.d/5845.feature | 1 + docs/admin_api/purge_room.md | 18 ++++ synapse/handlers/pagination.py | 17 ++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/purge_room_servlet.py | 57 +++++++++++++ synapse/storage/events.py | 137 +++++++++++++++++++++++++++++++ 6 files changed, 232 insertions(+) create mode 100644 changelog.d/5845.feature create mode 100644 docs/admin_api/purge_room.md create mode 100644 synapse/rest/admin/purge_room_servlet.py diff --git a/changelog.d/5845.feature b/changelog.d/5845.feature new file mode 100644 index 0000000000..7b0dc9a95e --- /dev/null +++ b/changelog.d/5845.feature @@ -0,0 +1 @@ +Add an admin API to purge old rooms from the database. diff --git a/docs/admin_api/purge_room.md b/docs/admin_api/purge_room.md new file mode 100644 index 0000000000..64ea7b6a64 --- /dev/null +++ b/docs/admin_api/purge_room.md @@ -0,0 +1,18 @@ +Purge room API +============== + +This API will remove all trace of a room from your database. + +All local users must have left the room before it can be removed. + +The API is: + +``` +POST /_synapse/admin/v1/purge_room + +{ + "room_id": "!room:id" +} +``` + +You must authenticate using the access token of an admin user. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index d83aab3f74..5744f4579d 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -70,6 +70,7 @@ class PaginationHandler(object): self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() + self._server_name = hs.hostname self.pagination_lock = ReadWriteLock() self._purges_in_progress_by_room = set() @@ -153,6 +154,22 @@ class PaginationHandler(object): """ return self._purges_by_id.get(purge_id) + async def purge_room(self, room_id): + """Purge the given room from the database""" + with (await self.pagination_lock.write(room_id)): + # check we know about the room + await self.store.get_room_version(room_id) + + # first check that we have no users in this room + joined = await defer.maybeDeferred( + self.store.is_host_joined, room_id, self._server_name + ) + + if joined: + raise SynapseError(400, "Users are still joined to this room") + + await self.store.purge_room(room_id) + @defer.inlineCallbacks def get_messages( self, diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 5720cab425..0dce256840 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -42,6 +42,7 @@ from synapse.rest.admin._base import ( historical_admin_path_patterns, ) from synapse.rest.admin.media import register_servlets_for_media_repo +from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string @@ -738,6 +739,7 @@ def register_servlets(hs, http_server): Register all the admin servlets. """ register_servlets_for_client_rest_resource(hs, http_server) + PurgeRoomServlet(hs).register(http_server) SendServerNoticeServlet(hs).register(http_server) VersionServlet(hs).register(http_server) diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py new file mode 100644 index 0000000000..2922eb543e --- /dev/null +++ b/synapse/rest/admin/purge_room_servlet.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) +from synapse.rest.admin import assert_requester_is_admin + + +class PurgeRoomServlet(RestServlet): + """Servlet which will remove all trace of a room from the database + + POST /_synapse/admin/v1/purge_room + { + "room_id": "!room:id" + } + + returns: + + {} + """ + + PATTERNS = (re.compile("^/_synapse/admin/v1/purge_room$"),) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + self.hs = hs + self.auth = hs.get_auth() + self.pagination_handler = hs.get_pagination_handler() + + async def on_POST(self, request): + await assert_requester_is_admin(self.auth, request) + + body = parse_json_object_from_request(request) + assert_params_in_dict(body, ("room_id",)) + + await self.pagination_handler.purge_room(body["room_id"]) + + return (200, {}) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6fcfa4d789..5a95c36a8b 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2181,6 +2181,143 @@ class EventsStore( return to_delete, to_dedelta + def purge_room(self, room_id): + """Deletes all record of a room + + Args: + room_id (str): + """ + + return self.runInteraction("purge_room", self._purge_room_txn, room_id) + + def _purge_room_txn(self, txn, room_id): + # first we have to delete the state groups states + logger.info("[purge] removing %s from state_groups_state", room_id) + + txn.execute( + """ + DELETE FROM state_groups_state WHERE state_group IN ( + SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) + WHERE events.room_id=? + ) + """, + (room_id,), + ) + + # ... and the state group edges + logger.info("[purge] removing %s from state_group_edges", room_id) + + txn.execute( + """ + DELETE FROM state_group_edges WHERE state_group IN ( + SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) + WHERE events.room_id=? + ) + """, + (room_id,), + ) + + # ... and the state groups + logger.info("[purge] removing %s from state_groups", room_id) + + txn.execute( + """ + DELETE FROM state_groups WHERE id IN ( + SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) + WHERE events.room_id=? + ) + """, + (room_id,), + ) + + # and then tables which lack an index on room_id but have one on event_id + for table in ( + "event_auth", + "event_edges", + "event_push_actions_staging", + "event_reference_hashes", + "event_relations", + "event_to_state_groups", + "redactions", + "rejections", + "state_events", + ): + logger.info("[purge] removing %s from %s", room_id, table) + + txn.execute( + """ + DELETE FROM %s WHERE event_id IN ( + SELECT event_id FROM events WHERE room_id=? + ) + """ + % (table,), + (room_id,), + ) + + # and finally, the tables with an index on room_id (or no useful index) + for table in ( + "current_state_events", + "event_backward_extremities", + "event_forward_extremities", + "event_json", + "event_push_actions", + "event_search", + "events", + "group_rooms", + "public_room_list_stream", + "receipts_graph", + "receipts_linearized", + "room_aliases", + "room_depth", + "room_memberships", + "room_state", + "room_stats", + "room_stats_earliest_token", + "rooms", + "stream_ordering_to_exterm", + "topics", + "users_in_public_rooms", + "users_who_share_private_rooms", + # no useful index, but let's clear them anyway + "appservice_room_list", + "e2e_room_keys", + "event_push_summary", + "pusher_throttle", + "group_summary_rooms", + "local_invites", + "room_account_data", + "room_tags", + ): + logger.info("[purge] removing %s from %s", room_id, table) + txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,)) + + # Other tables we do NOT need to clear out: + # + # - blocked_rooms + # This is important, to make sure that we don't accidentally rejoin a blocked + # room after it was purged + # + # - user_directory + # This has a room_id column, but it is unused + # + + # Other tables that we might want to consider clearing out include: + # + # - event_reports + # Given that these are intended for abuse management my initial + # inclination is to leave them in place. + # + # - current_state_delta_stream + # - ex_outlier_stream + # - room_tags_revisions + # The problem with these is that they are largeish and there is no room_id + # index on them. In any case we should be clearing out 'stream' tables + # periodically anyway (#5888) + + # TODO: we could probably usefully do a bunch of cache invalidation here + + logger.info("[purge] done") + @defer.inlineCallbacks def is_event_after(self, event_id1, event_id2): """Returns True if event_id1 is after event_id2 in the stream -- cgit 1.4.1 From c9f11d09fc85470cf9a36909104734a3682c4b39 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 22 Aug 2019 10:43:13 +0100 Subject: Add missing index on users_in_public_rooms. (#5894) --- changelog.d/5894.misc | 1 + .../schema/delta/56/users_in_public_rooms_idx.sql | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog.d/5894.misc create mode 100644 synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql diff --git a/changelog.d/5894.misc b/changelog.d/5894.misc new file mode 100644 index 0000000000..fca4485ff7 --- /dev/null +++ b/changelog.d/5894.misc @@ -0,0 +1 @@ +Add missing index on users_in_public_rooms to improve the performance of directory queries. diff --git a/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql b/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql new file mode 100644 index 0000000000..149f8be8b6 --- /dev/null +++ b/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql @@ -0,0 +1,17 @@ +/* Copyright 2019 Matrix.org Foundation CIC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- this was apparently forgotten when the table was created back in delta 53. +CREATE INDEX users_in_public_rooms_r_idx ON users_in_public_rooms(room_id); -- cgit 1.4.1 From 9a6f2be5724bb0ed53a4b04e7fbb7ccee39050bd Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Thu, 22 Aug 2019 11:28:12 +0100 Subject: Opentrace e2e keys (#5855) Add opentracing tags and logs for e2e keys --- changelog.d/5855.misc | 1 + synapse/federation/federation_server.py | 3 ++ synapse/handlers/e2e_keys.py | 52 ++++++++++++++++++++++++++++++++- synapse/handlers/e2e_room_keys.py | 28 ++++++++++++++++-- synapse/rest/client/v2_alpha/keys.py | 13 ++++++++- synapse/storage/e2e_room_keys.py | 14 +++++++++ synapse/storage/end_to_end_keys.py | 38 ++++++++++++++++++++++-- 7 files changed, 142 insertions(+), 7 deletions(-) create mode 100644 changelog.d/5855.misc diff --git a/changelog.d/5855.misc b/changelog.d/5855.misc new file mode 100644 index 0000000000..32db7fbe37 --- /dev/null +++ b/changelog.d/5855.misc @@ -0,0 +1 @@ +Opentracing for room and e2e keys. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d216c46dfe..9286ca3202 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -43,6 +43,7 @@ from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.endpoint import parse_server_name from synapse.logging.context import nested_logging_context +from synapse.logging.opentracing import log_kv, trace from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, @@ -507,6 +508,7 @@ class FederationServer(FederationBase): def on_query_user_devices(self, origin, user_id): return self.on_query_request("user_devices", user_id) + @trace @defer.inlineCallbacks @log_function def on_claim_client_keys(self, origin, content): @@ -515,6 +517,7 @@ class FederationServer(FederationBase): for device_id, algorithm in device_keys.items(): query.append((user_id, device_id, algorithm)) + log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) results = yield self.store.claim_e2e_one_time_keys(query) json_result = {} diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 1f90b0d278..056fb97acb 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.api.errors import CodeMessageException, SynapseError from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace from synapse.types import UserID, get_domain_from_id from synapse.util import unwrapFirstError from synapse.util.retryutils import NotRetryingDestination @@ -46,6 +47,7 @@ class E2eKeysHandler(object): "client_keys", self.on_federation_query_client_keys ) + @trace @defer.inlineCallbacks def query_devices(self, query_body, timeout): """ Handle a device key query from a client @@ -81,6 +83,9 @@ class E2eKeysHandler(object): else: remote_queries[user_id] = device_ids + set_tag("local_key_query", local_query) + set_tag("remote_key_query", remote_queries) + # First get local devices. failures = {} results = {} @@ -121,6 +126,7 @@ class E2eKeysHandler(object): r[user_id] = remote_queries[user_id] # Now fetch any devices that we don't have in our cache + @trace @defer.inlineCallbacks def do_remote_query(destination): """This is called when we are querying the device list of a user on @@ -185,6 +191,8 @@ class E2eKeysHandler(object): except Exception as e: failure = _exception_to_failure(e) failures[destination] = failure + set_tag("error", True) + set_tag("reason", failure) yield make_deferred_yieldable( defer.gatherResults( @@ -198,6 +206,7 @@ class E2eKeysHandler(object): return {"device_keys": results, "failures": failures} + @trace @defer.inlineCallbacks def query_local_devices(self, query): """Get E2E device keys for local users @@ -210,6 +219,7 @@ class E2eKeysHandler(object): defer.Deferred: (resolves to dict[string, dict[string, dict]]): map from user_id -> device_id -> device details """ + set_tag("local_query", query) local_query = [] result_dict = {} @@ -217,6 +227,14 @@ class E2eKeysHandler(object): # we use UserID.from_string to catch invalid user ids if not self.is_mine(UserID.from_string(user_id)): logger.warning("Request for keys for non-local user %s", user_id) + log_kv( + { + "message": "Requested a local key for a user which" + " was not local to the homeserver", + "user_id": user_id, + } + ) + set_tag("error", True) raise SynapseError(400, "Not a user here") if not device_ids: @@ -241,6 +259,7 @@ class E2eKeysHandler(object): r["unsigned"]["device_display_name"] = display_name result_dict[user_id][device_id] = r + log_kv(results) return result_dict @defer.inlineCallbacks @@ -251,6 +270,7 @@ class E2eKeysHandler(object): res = yield self.query_local_devices(device_keys_query) return {"device_keys": res} + @trace @defer.inlineCallbacks def claim_one_time_keys(self, query, timeout): local_query = [] @@ -265,6 +285,9 @@ class E2eKeysHandler(object): domain = get_domain_from_id(user_id) remote_queries.setdefault(domain, {})[user_id] = device_keys + set_tag("local_key_query", local_query) + set_tag("remote_key_query", remote_queries) + results = yield self.store.claim_e2e_one_time_keys(local_query) json_result = {} @@ -276,8 +299,10 @@ class E2eKeysHandler(object): key_id: json.loads(json_bytes) } + @trace @defer.inlineCallbacks def claim_client_keys(destination): + set_tag("destination", destination) device_keys = remote_queries[destination] try: remote_result = yield self.federation.claim_client_keys( @@ -290,6 +315,8 @@ class E2eKeysHandler(object): except Exception as e: failure = _exception_to_failure(e) failures[destination] = failure + set_tag("error", True) + set_tag("reason", failure) yield make_deferred_yieldable( defer.gatherResults( @@ -313,9 +340,11 @@ class E2eKeysHandler(object): ), ) + log_kv({"one_time_keys": json_result, "failures": failures}) return {"one_time_keys": json_result, "failures": failures} @defer.inlineCallbacks + @tag_args def upload_keys_for_user(self, user_id, device_id, keys): time_now = self.clock.time_msec() @@ -329,6 +358,13 @@ class E2eKeysHandler(object): user_id, time_now, ) + log_kv( + { + "message": "Updating device_keys for user.", + "user_id": user_id, + "device_id": device_id, + } + ) # TODO: Sign the JSON with the server key changed = yield self.store.set_e2e_device_keys( user_id, device_id, time_now, device_keys @@ -336,12 +372,24 @@ class E2eKeysHandler(object): if changed: # Only notify about device updates *if* the keys actually changed yield self.device_handler.notify_device_update(user_id, [device_id]) - + else: + log_kv({"message": "Not updating device_keys for user", "user_id": user_id}) one_time_keys = keys.get("one_time_keys", None) if one_time_keys: + log_kv( + { + "message": "Updating one_time_keys for device.", + "user_id": user_id, + "device_id": device_id, + } + ) yield self._upload_one_time_keys_for_user( user_id, device_id, time_now, one_time_keys ) + else: + log_kv( + {"message": "Did not update one_time_keys", "reason": "no keys given"} + ) # the device should have been registered already, but it may have been # deleted due to a race with a DELETE request. Or we may be using an @@ -352,6 +400,7 @@ class E2eKeysHandler(object): result = yield self.store.count_e2e_one_time_keys(user_id, device_id) + set_tag("one_time_key_counts", result) return {"one_time_key_counts": result} @defer.inlineCallbacks @@ -395,6 +444,7 @@ class E2eKeysHandler(object): (algorithm, key_id, encode_canonical_json(key).decode("ascii")) ) + log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys}) yield self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 41b871fc59..a9d80f708c 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( StoreError, SynapseError, ) +from synapse.logging.opentracing import log_kv, trace from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) @@ -49,6 +50,7 @@ class E2eRoomKeysHandler(object): # changed. self._upload_linearizer = Linearizer("upload_room_keys_lock") + @trace @defer.inlineCallbacks def get_room_keys(self, user_id, version, room_id=None, session_id=None): """Bulk get the E2E room keys for a given backup, optionally filtered to a given @@ -84,8 +86,10 @@ class E2eRoomKeysHandler(object): user_id, version, room_id, session_id ) + log_kv(results) return results + @trace @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id=None, session_id=None): """Bulk delete the E2E room keys for a given backup, optionally filtered to a given @@ -107,6 +111,7 @@ class E2eRoomKeysHandler(object): with (yield self._upload_linearizer.queue(user_id)): yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) + @trace @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): """Bulk upload a list of room keys into a given backup version, asserting @@ -186,7 +191,14 @@ class E2eRoomKeysHandler(object): session_id(str): the session whose room_key we're setting room_key(dict): the room_key being set """ - + log_kv( + { + "message": "Trying to upload room key", + "room_id": room_id, + "session_id": session_id, + "user_id": user_id, + } + ) # get the room_key for this particular row current_room_key = None try: @@ -195,14 +207,23 @@ class E2eRoomKeysHandler(object): ) except StoreError as e: if e.code == 404: - pass + log_kv( + { + "message": "Room key not found.", + "room_id": room_id, + "user_id": user_id, + } + ) else: raise if self._should_replace_room_key(current_room_key, room_key): + log_kv({"message": "Replacing room key."}) yield self.store.set_e2e_room_key( user_id, version, room_id, session_id, room_key ) + else: + log_kv({"message": "Not replacing room_key."}) @staticmethod def _should_replace_room_key(current_room_key, room_key): @@ -236,6 +257,7 @@ class E2eRoomKeysHandler(object): return False return True + @trace @defer.inlineCallbacks def create_version(self, user_id, version_info): """Create a new backup version. This automatically becomes the new @@ -294,6 +316,7 @@ class E2eRoomKeysHandler(object): raise return res + @trace @defer.inlineCallbacks def delete_version(self, user_id, version=None): """Deletes a given version of the user's e2e_room_keys backup @@ -314,6 +337,7 @@ class E2eRoomKeysHandler(object): else: raise + @trace @defer.inlineCallbacks def update_version(self, user_id, version, version_info): """Update the info about a given version of the user's backup diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 6008adec7c..b218a3f334 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -24,6 +24,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.logging.opentracing import log_kv, set_tag, trace_using_operation_name from synapse.types import StreamToken from ._base import client_patterns @@ -68,6 +69,7 @@ class KeyUploadServlet(RestServlet): self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() + @trace_using_operation_name("upload_keys") @defer.inlineCallbacks def on_POST(self, request, device_id): requester = yield self.auth.get_user_by_req(request, allow_guest=True) @@ -78,6 +80,14 @@ class KeyUploadServlet(RestServlet): # passing the device_id here is deprecated; however, we allow it # for now for compatibility with older clients. if requester.device_id is not None and device_id != requester.device_id: + set_tag("error", True) + log_kv( + { + "message": "Client uploading keys for a different device", + "logged_in_id": requester.device_id, + "key_being_uploaded": device_id, + } + ) logger.warning( "Client uploading keys for a different device " "(logged in as %s, uploading for %s)", @@ -178,10 +188,11 @@ class KeyChangesServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from") + set_tag("from", from_token_string) # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. - parse_string(request, "to") + set_tag("to", parse_string(request, "to")) from_token = StreamToken.from_string(from_token_string) diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index b1901404af..be2fe2bab6 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -18,6 +18,7 @@ import json from twisted.internet import defer from synapse.api.errors import StoreError +from synapse.logging.opentracing import log_kv, trace from ._base import SQLBaseStore @@ -94,7 +95,16 @@ class EndToEndRoomKeyStore(SQLBaseStore): }, lock=False, ) + log_kv( + { + "message": "Set room key", + "room_id": room_id, + "session_id": session_id, + "room_key": room_key, + } + ) + @trace @defer.inlineCallbacks def get_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): """Bulk get the E2E room keys for a given backup, optionally filtered to a given @@ -153,6 +163,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): return sessions + @trace @defer.inlineCallbacks def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): """Bulk delete the E2E room keys for a given backup, optionally filtered to a given @@ -236,6 +247,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn ) + @trace def create_e2e_room_keys_version(self, user_id, info): """Atomically creates a new version of this user's e2e_room_keys store with the given version info. @@ -276,6 +288,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn ) + @trace def update_e2e_room_keys_version(self, user_id, version, info): """Update a given backup version @@ -292,6 +305,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): desc="update_e2e_room_keys_version", ) + @trace def delete_e2e_room_keys_version(self, user_id, version=None): """Delete a given backup version of the user's room keys. Doesn't delete their actual key data. diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 1e07474e70..33e3a84933 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -18,12 +18,14 @@ from canonicaljson import encode_canonical_json from twisted.internet import defer +from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.util.caches.descriptors import cached from ._base import SQLBaseStore, db_to_json class EndToEndKeyWorkerStore(SQLBaseStore): + @trace @defer.inlineCallbacks def get_e2e_device_keys( self, query_list, include_all_devices=False, include_deleted_devices=False @@ -40,6 +42,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): Dict mapping from user-id to dict mapping from device_id to dict containing "key_json", "device_display_name". """ + set_tag("query_list", query_list) if not query_list: return {} @@ -57,9 +60,13 @@ class EndToEndKeyWorkerStore(SQLBaseStore): return results + @trace def _get_e2e_device_keys_txn( self, txn, query_list, include_all_devices=False, include_deleted_devices=False ): + set_tag("include_all_devices", include_all_devices) + set_tag("include_deleted_devices", include_deleted_devices) + query_clauses = [] query_params = [] @@ -104,6 +111,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): for user_id, device_id in deleted_devices: result.setdefault(user_id, {})[device_id] = None + log_kv(result) return result @defer.inlineCallbacks @@ -129,8 +137,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore): keyvalues={"user_id": user_id, "device_id": device_id}, desc="add_e2e_one_time_keys_check", ) - - return {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} + result = {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} + log_kv({"message": "Fetched one time keys for user", "one_time_keys": result}) + return result @defer.inlineCallbacks def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys): @@ -146,6 +155,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore): """ def _add_e2e_one_time_keys(txn): + set_tag("user_id", user_id) + set_tag("device_id", device_id) + set_tag("new_keys", new_keys) # We are protected from race between lookup and insertion due to # a unique constraint. If there is a race of two calls to # `add_e2e_one_time_keys` then they'll conflict and we will only @@ -202,6 +214,11 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): """ def _set_e2e_device_keys_txn(txn): + set_tag("user_id", user_id) + set_tag("device_id", device_id) + set_tag("time_now", time_now) + set_tag("device_keys", device_keys) + old_key_json = self._simple_select_one_onecol_txn( txn, table="e2e_device_keys_json", @@ -215,6 +232,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): new_key_json = encode_canonical_json(device_keys).decode("utf-8") if old_key_json == new_key_json: + log_kv({"Message": "Device key already stored."}) return False self._simple_upsert_txn( @@ -223,7 +241,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): keyvalues={"user_id": user_id, "device_id": device_id}, values={"ts_added_ms": time_now, "key_json": new_key_json}, ) - + log_kv({"message": "Device keys stored."}) return True return self.runInteraction("set_e2e_device_keys", _set_e2e_device_keys_txn) @@ -231,6 +249,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def claim_e2e_one_time_keys(self, query_list): """Take a list of one time keys out of the database""" + @trace def _claim_e2e_one_time_keys(txn): sql = ( "SELECT key_id, key_json FROM e2e_one_time_keys_json" @@ -252,7 +271,13 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): " AND key_id = ?" ) for user_id, device_id, algorithm, key_id in delete: + log_kv( + { + "message": "Executing claim e2e_one_time_keys transaction on database." + } + ) txn.execute(sql, (user_id, device_id, algorithm, key_id)) + log_kv({"message": "finished executing and invalidating cache"}) self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) @@ -262,6 +287,13 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def delete_e2e_keys_by_device(self, user_id, device_id): def delete_e2e_keys_by_device_txn(txn): + log_kv( + { + "message": "Deleting keys for device", + "device_id": device_id, + "user_id": user_id, + } + ) self._simple_delete_txn( txn, table="e2e_device_keys_json", -- cgit 1.4.1 From 3320aaab3a9bba3f5872371aba7053b41af9d0a0 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Thu, 22 Aug 2019 14:17:57 +0100 Subject: Add "require_consent" parameter for registration --- synapse/handlers/register.py | 14 ++++++++++++-- synapse/replication/http/register.py | 2 ++ synapse/rest/client/v2_alpha/register.py | 5 ++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 4631fab94e..5c92960d25 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -150,6 +150,7 @@ class RegistrationHandler(BaseHandler): threepid=None, user_type=None, default_display_name=None, + require_consent=True, address=None, bind_emails=[], ): @@ -167,6 +168,7 @@ class RegistrationHandler(BaseHandler): will be set to this. Defaults to 'localpart'. address (str|None): the IP address used to perform the registration. bind_emails (List[str]): list of emails to bind to this account. + require_consent (bool): Should the user be required to give consent. Returns: Deferred[str]: user_id Raises: @@ -211,6 +213,7 @@ class RegistrationHandler(BaseHandler): admin=admin, user_type=user_type, address=address, + require_consent=require_consent, ) if self.hs.config.user_directory_search_all_users: @@ -244,7 +247,7 @@ class RegistrationHandler(BaseHandler): user_id = None attempts += 1 - if not self.hs.config.user_consent_at_registration: + if not self.hs.config.user_consent_at_registration and require_consent: yield self._auto_join_rooms(user_id) else: logger.info( @@ -525,6 +528,7 @@ class RegistrationHandler(BaseHandler): ratelimit=False, ) + @defer.inlineCallbacks def register_with_store( self, user_id, @@ -536,6 +540,7 @@ class RegistrationHandler(BaseHandler): admin=False, user_type=None, address=None, + require_consent=True, ): """Register user in the datastore. @@ -553,7 +558,7 @@ class RegistrationHandler(BaseHandler): user_type (str|None): type of user. One of the values from api.constants.UserTypes, or None for a normal user. address (str|None): the IP address used to perform the registration. - + require_consent (bool): Should the user be required to give consent. Returns: Deferred """ @@ -584,8 +589,12 @@ class RegistrationHandler(BaseHandler): admin=admin, user_type=user_type, address=address, + require_consent=require_consent, ) else: + if require_consent is False: + yield self.store.user_set_consent_version(user_id, "no-consent-required") + return self.store.register_user( user_id=user_id, password_hash=password_hash, @@ -597,6 +606,7 @@ class RegistrationHandler(BaseHandler): user_type=user_type, ) + @defer.inlineCallbacks def register_device(self, user_id, device_id, initial_display_name, is_guest=False): """Register a device for a user and generate an access token. diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 3341320a87..65702de082 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -72,6 +72,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): "admin": admin, "user_type": user_type, "address": address, + "require_consent": require_consent, } @defer.inlineCallbacks @@ -88,6 +89,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): admin=content["admin"], user_type=content["user_type"], address=content["address"], + require_consent=content["require_consent"], ) return (200, {}) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 05ea1459e3..724231f364 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -525,6 +525,9 @@ class RegisterRestServlet(RestServlet): # downcased one in `username` for the mac calculation user = body["username"].encode("utf-8") + # do not require consent for this user (for example, bots) + require_consent = body.get("require_consent", True) + # str() because otherwise hmac complains that 'unicode' does not # have the buffer interface got_mac = str(body["mac"]) @@ -542,7 +545,7 @@ class RegisterRestServlet(RestServlet): raise SynapseError(403, "HMAC incorrect") user_id = yield self.registration_handler.register_user( - localpart=username, password=password + localpart=username, password=password, require_consent=require_consent, ) result = yield self._create_registration_details(user_id, body) -- cgit 1.4.1 From 27a686e53b8ba3f2e2f102fae73e598c00ec0086 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Thu, 22 Aug 2019 14:22:04 +0100 Subject: Do not send consent notices if "no-consent-required" is set --- synapse/server_notices/consent_server_notices.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 415e9c17d8..8e82ee32b2 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -80,6 +80,10 @@ class ConsentServerNotices(object): try: u = yield self._store.get_user_by_id(user_id) + if u["consent_version"] == "no-consent-required": + # user is exempt + return + if u["is_guest"] and not self._send_to_guests: # don't send to guests return -- cgit 1.4.1 From 1c5b8c622248d4ee3b38b01a997eaa8844859beb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 22 Aug 2019 14:47:34 +0100 Subject: Revert "Add "require_consent" parameter for registration" This reverts commit 3320aaab3a9bba3f5872371aba7053b41af9d0a0. --- synapse/handlers/register.py | 14 ++------------ synapse/replication/http/register.py | 2 -- synapse/rest/client/v2_alpha/register.py | 5 +---- 3 files changed, 3 insertions(+), 18 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 5c92960d25..4631fab94e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -150,7 +150,6 @@ class RegistrationHandler(BaseHandler): threepid=None, user_type=None, default_display_name=None, - require_consent=True, address=None, bind_emails=[], ): @@ -168,7 +167,6 @@ class RegistrationHandler(BaseHandler): will be set to this. Defaults to 'localpart'. address (str|None): the IP address used to perform the registration. bind_emails (List[str]): list of emails to bind to this account. - require_consent (bool): Should the user be required to give consent. Returns: Deferred[str]: user_id Raises: @@ -213,7 +211,6 @@ class RegistrationHandler(BaseHandler): admin=admin, user_type=user_type, address=address, - require_consent=require_consent, ) if self.hs.config.user_directory_search_all_users: @@ -247,7 +244,7 @@ class RegistrationHandler(BaseHandler): user_id = None attempts += 1 - if not self.hs.config.user_consent_at_registration and require_consent: + if not self.hs.config.user_consent_at_registration: yield self._auto_join_rooms(user_id) else: logger.info( @@ -528,7 +525,6 @@ class RegistrationHandler(BaseHandler): ratelimit=False, ) - @defer.inlineCallbacks def register_with_store( self, user_id, @@ -540,7 +536,6 @@ class RegistrationHandler(BaseHandler): admin=False, user_type=None, address=None, - require_consent=True, ): """Register user in the datastore. @@ -558,7 +553,7 @@ class RegistrationHandler(BaseHandler): user_type (str|None): type of user. One of the values from api.constants.UserTypes, or None for a normal user. address (str|None): the IP address used to perform the registration. - require_consent (bool): Should the user be required to give consent. + Returns: Deferred """ @@ -589,12 +584,8 @@ class RegistrationHandler(BaseHandler): admin=admin, user_type=user_type, address=address, - require_consent=require_consent, ) else: - if require_consent is False: - yield self.store.user_set_consent_version(user_id, "no-consent-required") - return self.store.register_user( user_id=user_id, password_hash=password_hash, @@ -606,7 +597,6 @@ class RegistrationHandler(BaseHandler): user_type=user_type, ) - @defer.inlineCallbacks def register_device(self, user_id, device_id, initial_display_name, is_guest=False): """Register a device for a user and generate an access token. diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 65702de082..3341320a87 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -72,7 +72,6 @@ class ReplicationRegisterServlet(ReplicationEndpoint): "admin": admin, "user_type": user_type, "address": address, - "require_consent": require_consent, } @defer.inlineCallbacks @@ -89,7 +88,6 @@ class ReplicationRegisterServlet(ReplicationEndpoint): admin=content["admin"], user_type=content["user_type"], address=content["address"], - require_consent=content["require_consent"], ) return (200, {}) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 724231f364..05ea1459e3 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -525,9 +525,6 @@ class RegisterRestServlet(RestServlet): # downcased one in `username` for the mac calculation user = body["username"].encode("utf-8") - # do not require consent for this user (for example, bots) - require_consent = body.get("require_consent", True) - # str() because otherwise hmac complains that 'unicode' does not # have the buffer interface got_mac = str(body["mac"]) @@ -545,7 +542,7 @@ class RegisterRestServlet(RestServlet): raise SynapseError(403, "HMAC incorrect") user_id = yield self.registration_handler.register_user( - localpart=username, password=password, require_consent=require_consent, + localpart=username, password=password ) result = yield self._create_registration_details(user_id, body) -- cgit 1.4.1 From dbd46decad5f47208171b73949714d9dcb1a87b1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 22 Aug 2019 14:47:43 +0100 Subject: Revert "Do not send consent notices if "no-consent-required" is set" This reverts commit 27a686e53b8ba3f2e2f102fae73e598c00ec0086. --- synapse/server_notices/consent_server_notices.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 8e82ee32b2..415e9c17d8 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -80,10 +80,6 @@ class ConsentServerNotices(object): try: u = yield self._store.get_user_by_id(user_id) - if u["consent_version"] == "no-consent-required": - # user is exempt - return - if u["is_guest"] and not self._send_to_guests: # don't send to guests return -- cgit 1.4.1 From 0bab582fd6f4b42b64ecf09f5d8dbab568172d55 Mon Sep 17 00:00:00 2001 From: Manuel Stahl Date: Tue, 23 Jul 2019 11:55:18 +0200 Subject: Remove shared secret registration from client/r0/register endpoint This type of registration was probably never used. It only includes the user name in the HMAC but not the password. Shared secret registration is still available via client/r0/admin/register. Signed-off-by: Manuel Stahl --- changelog.d/5877.removal | 1 + synapse/rest/client/v2_alpha/register.py | 57 +++----------------------------- 2 files changed, 5 insertions(+), 53 deletions(-) create mode 100644 changelog.d/5877.removal diff --git a/changelog.d/5877.removal b/changelog.d/5877.removal new file mode 100644 index 0000000000..b6d84fb401 --- /dev/null +++ b/changelog.d/5877.removal @@ -0,0 +1 @@ +Remove shared secret registration from client/r0/register endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 05ea1459e3..9510a1e2b0 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -16,7 +16,6 @@ import hmac import logging -from hashlib import sha1 from six import string_types @@ -239,14 +238,12 @@ class RegisterRestServlet(RestServlet): # we do basic sanity checks here because the auth layer will store these # in sessions. Pull out the username/password provided to us. - desired_password = None if "password" in body: if ( not isinstance(body["password"], string_types) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") - desired_password = body["password"] desired_username = None if "username" in body: @@ -261,8 +258,8 @@ class RegisterRestServlet(RestServlet): if self.auth.has_access_token(request): appservice = yield self.auth.get_appservice_by_req(request) - # fork off as soon as possible for ASes and shared secret auth which - # have completely different registration flows to normal users + # fork off as soon as possible for ASes which have completely + # different registration flows to normal users # == Application Service Registration == if appservice: @@ -285,8 +282,8 @@ class RegisterRestServlet(RestServlet): return (200, result) # we throw for non 200 responses return - # for either shared secret or regular registration, downcase the - # provided username before attempting to register it. This should mean + # for regular registration, downcase the provided username before + # attempting to register it. This should mean # that people who try to register with upper-case in their usernames # don't get a nasty surprise. (Note that we treat username # case-insenstively in login, so they are free to carry on imagining @@ -294,16 +291,6 @@ class RegisterRestServlet(RestServlet): if desired_username is not None: desired_username = desired_username.lower() - # == Shared Secret Registration == (e.g. create new user scripts) - if "mac" in body: - # FIXME: Should we really be determining if this is shared secret - # auth based purely on the 'mac' key? - result = yield self._do_shared_secret_registration( - desired_username, desired_password, body - ) - return (200, result) # we throw for non 200 responses - return - # == Normal User Registration == (everyone else) if not self.hs.config.enable_registration: raise SynapseError(403, "Registration has been disabled") @@ -512,42 +499,6 @@ class RegisterRestServlet(RestServlet): ) return (yield self._create_registration_details(user_id, body)) - @defer.inlineCallbacks - def _do_shared_secret_registration(self, username, password, body): - if not self.hs.config.registration_shared_secret: - raise SynapseError(400, "Shared secret registration is not enabled") - if not username: - raise SynapseError( - 400, "username must be specified", errcode=Codes.BAD_JSON - ) - - # use the username from the original request rather than the - # downcased one in `username` for the mac calculation - user = body["username"].encode("utf-8") - - # str() because otherwise hmac complains that 'unicode' does not - # have the buffer interface - got_mac = str(body["mac"]) - - # FIXME this is different to the /v1/register endpoint, which - # includes the password and admin flag in the hashed text. Why are - # these different? - want_mac = hmac.new( - key=self.hs.config.registration_shared_secret.encode(), - msg=user, - digestmod=sha1, - ).hexdigest() - - if not compare_digest(want_mac, got_mac): - raise SynapseError(403, "HMAC incorrect") - - user_id = yield self.registration_handler.register_user( - localpart=username, password=password - ) - - result = yield self._create_registration_details(user_id, body) - return result - @defer.inlineCallbacks def _create_registration_details(self, user_id, params): """Complete registration of newly-registered user -- cgit 1.4.1 From 812ed6b0d5b2c682d8032fc83e3041a9da93f670 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Thu, 22 Aug 2019 18:08:07 +0100 Subject: Opentracing across workers (#5771) Propagate opentracing contexts across workers Also includes some Convenience modifications to opentracing for servlets, notably: - Add boolean to skip the whitelisting check on inject extract methods. - useful when injecting into carriers locally. Otherwise we'd always have to include our own servername and whitelist our servername - start_active_span_from_request instead of header - Add boolean to decide whether to extract context from a request to a servlet --- changelog.d/5771.feature | 1 + synapse/federation/transport/server.py | 43 ++++++---- synapse/http/servlet.py | 2 +- synapse/logging/opentracing.py | 144 ++++++++++++++++++--------------- synapse/replication/http/_base.py | 16 +++- 5 files changed, 123 insertions(+), 83 deletions(-) create mode 100644 changelog.d/5771.feature diff --git a/changelog.d/5771.feature b/changelog.d/5771.feature new file mode 100644 index 0000000000..f2f4de1fdd --- /dev/null +++ b/changelog.d/5771.feature @@ -0,0 +1 @@ +Make Opentracing work in worker mode. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a17148fc3c..dc53b4b170 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -38,7 +38,12 @@ from synapse.http.servlet import ( parse_string_from_args, ) from synapse.logging.context import run_in_background -from synapse.logging.opentracing import start_active_span_from_context, tags +from synapse.logging.opentracing import ( + start_active_span, + start_active_span_from_request, + tags, + whitelisted_homeserver, +) from synapse.types import ThirdPartyInstanceID, get_domain_from_id from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string @@ -288,20 +293,28 @@ class BaseFederationServlet(object): logger.warn("authenticate_request failed: %s", e) raise - # Start an opentracing span - with start_active_span_from_context( - request.requestHeaders, - "incoming-federation-request", - tags={ - "request_id": request.get_request_id(), - tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, - tags.HTTP_METHOD: request.get_method(), - tags.HTTP_URL: request.get_redacted_uri(), - tags.PEER_HOST_IPV6: request.getClientIP(), - "authenticated_entity": origin, - "servlet_name": request.request_metrics.name, - }, - ): + request_tags = { + "request_id": request.get_request_id(), + tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, + tags.HTTP_METHOD: request.get_method(), + tags.HTTP_URL: request.get_redacted_uri(), + tags.PEER_HOST_IPV6: request.getClientIP(), + "authenticated_entity": origin, + "servlet_name": request.request_metrics.name, + } + + # Only accept the span context if the origin is authenticated + # and whitelisted + if origin and whitelisted_homeserver(origin): + scope = start_active_span_from_request( + request, "incoming-federation-request", tags=request_tags + ) + else: + scope = start_active_span( + "incoming-federation-request", tags=request_tags + ) + + with scope: if origin: with ratelimiter.ratelimit(origin) as d: await d diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index fd07bf7b8e..c186b31f59 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -300,7 +300,7 @@ class RestServlet(object): http_server.register_paths( method, patterns, - trace_servlet(servlet_classname, method_handler), + trace_servlet(servlet_classname)(method_handler), servlet_classname, ) diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 6b706e1892..4abea4474b 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -174,10 +174,48 @@ from twisted.internet import defer from synapse.config import ConfigError +# Helper class + + +class _DummyTagNames(object): + """wrapper of opentracings tags. We need to have them if we + want to reference them without opentracing around. Clearly they + should never actually show up in a trace. `set_tags` overwrites + these with the correct ones.""" + + INVALID_TAG = "invalid-tag" + COMPONENT = INVALID_TAG + DATABASE_INSTANCE = INVALID_TAG + DATABASE_STATEMENT = INVALID_TAG + DATABASE_TYPE = INVALID_TAG + DATABASE_USER = INVALID_TAG + ERROR = INVALID_TAG + HTTP_METHOD = INVALID_TAG + HTTP_STATUS_CODE = INVALID_TAG + HTTP_URL = INVALID_TAG + MESSAGE_BUS_DESTINATION = INVALID_TAG + PEER_ADDRESS = INVALID_TAG + PEER_HOSTNAME = INVALID_TAG + PEER_HOST_IPV4 = INVALID_TAG + PEER_HOST_IPV6 = INVALID_TAG + PEER_PORT = INVALID_TAG + PEER_SERVICE = INVALID_TAG + SAMPLING_PRIORITY = INVALID_TAG + SERVICE = INVALID_TAG + SPAN_KIND = INVALID_TAG + SPAN_KIND_CONSUMER = INVALID_TAG + SPAN_KIND_PRODUCER = INVALID_TAG + SPAN_KIND_RPC_CLIENT = INVALID_TAG + SPAN_KIND_RPC_SERVER = INVALID_TAG + + try: import opentracing + + tags = opentracing.tags except ImportError: opentracing = None + tags = _DummyTagNames try: from jaeger_client import Config as JaegerConfig from synapse.logging.scopecontextmanager import LogContextScopeManager @@ -252,10 +290,6 @@ def init_tracer(config): scope_manager=LogContextScopeManager(config), ).initialize_tracer() - # Set up tags to be opentracing's tags - global tags - tags = opentracing.tags - # Whitelisting @@ -334,8 +368,8 @@ def start_active_span_follows_from(operation_name, contexts): return scope -def start_active_span_from_context( - headers, +def start_active_span_from_request( + request, operation_name, references=None, tags=None, @@ -344,9 +378,9 @@ def start_active_span_from_context( finish_on_close=True, ): """ - Extracts a span context from Twisted Headers. + Extracts a span context from a Twisted Request. args: - headers (twisted.web.http_headers.Headers) + headers (twisted.web.http.Request) For the other args see opentracing.tracer @@ -360,7 +394,9 @@ def start_active_span_from_context( if opentracing is None: return _noop_context_manager() - header_dict = {k.decode(): v[0].decode() for k, v in headers.getAllRawHeaders()} + header_dict = { + k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() + } context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict) return opentracing.tracer.start_active_span( @@ -448,7 +484,7 @@ def set_operation_name(operation_name): @only_if_tracing -def inject_active_span_twisted_headers(headers, destination): +def inject_active_span_twisted_headers(headers, destination, check_destination=True): """ Injects a span context into twisted headers in-place @@ -467,7 +503,7 @@ def inject_active_span_twisted_headers(headers, destination): https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py """ - if not whitelisted_homeserver(destination): + if check_destination and not whitelisted_homeserver(destination): return span = opentracing.tracer.active_span @@ -479,7 +515,7 @@ def inject_active_span_twisted_headers(headers, destination): @only_if_tracing -def inject_active_span_byte_dict(headers, destination): +def inject_active_span_byte_dict(headers, destination, check_destination=True): """ Injects a span context into a dict where the headers are encoded as byte strings @@ -511,7 +547,7 @@ def inject_active_span_byte_dict(headers, destination): @only_if_tracing -def inject_active_span_text_map(carrier, destination=None): +def inject_active_span_text_map(carrier, destination, check_destination=True): """ Injects a span context into a dict @@ -532,7 +568,7 @@ def inject_active_span_text_map(carrier, destination=None): https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py """ - if destination and not whitelisted_homeserver(destination): + if check_destination and not whitelisted_homeserver(destination): return opentracing.tracer.inject( @@ -689,65 +725,43 @@ def tag_args(func): return _tag_args_inner -def trace_servlet(servlet_name, func): +def trace_servlet(servlet_name, extract_context=False): """Decorator which traces a serlet. It starts a span with some servlet specific - tags such as the servlet_name and request information""" - if not opentracing: - return func + tags such as the servlet_name and request information - @wraps(func) - @defer.inlineCallbacks - def _trace_servlet_inner(request, *args, **kwargs): - with start_active_span( - "incoming-client-request", - tags={ + Args: + servlet_name (str): The name to be used for the span's operation_name + extract_context (bool): Whether to attempt to extract the opentracing + context from the request the servlet is handling. + + """ + + def _trace_servlet_inner_1(func): + if not opentracing: + return func + + @wraps(func) + @defer.inlineCallbacks + def _trace_servlet_inner(request, *args, **kwargs): + request_tags = { "request_id": request.get_request_id(), tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_METHOD: request.get_method(), tags.HTTP_URL: request.get_redacted_uri(), tags.PEER_HOST_IPV6: request.getClientIP(), - "servlet_name": servlet_name, - }, - ): - result = yield defer.maybeDeferred(func, request, *args, **kwargs) - return result - - return _trace_servlet_inner - - -# Helper class - + } -class _DummyTagNames(object): - """wrapper of opentracings tags. We need to have them if we - want to reference them without opentracing around. Clearly they - should never actually show up in a trace. `set_tags` overwrites - these with the correct ones.""" + if extract_context: + scope = start_active_span_from_request( + request, servlet_name, tags=request_tags + ) + else: + scope = start_active_span(servlet_name, tags=request_tags) - INVALID_TAG = "invalid-tag" - COMPONENT = INVALID_TAG - DATABASE_INSTANCE = INVALID_TAG - DATABASE_STATEMENT = INVALID_TAG - DATABASE_TYPE = INVALID_TAG - DATABASE_USER = INVALID_TAG - ERROR = INVALID_TAG - HTTP_METHOD = INVALID_TAG - HTTP_STATUS_CODE = INVALID_TAG - HTTP_URL = INVALID_TAG - MESSAGE_BUS_DESTINATION = INVALID_TAG - PEER_ADDRESS = INVALID_TAG - PEER_HOSTNAME = INVALID_TAG - PEER_HOST_IPV4 = INVALID_TAG - PEER_HOST_IPV6 = INVALID_TAG - PEER_PORT = INVALID_TAG - PEER_SERVICE = INVALID_TAG - SAMPLING_PRIORITY = INVALID_TAG - SERVICE = INVALID_TAG - SPAN_KIND = INVALID_TAG - SPAN_KIND_CONSUMER = INVALID_TAG - SPAN_KIND_PRODUCER = INVALID_TAG - SPAN_KIND_RPC_CLIENT = INVALID_TAG - SPAN_KIND_RPC_SERVER = INVALID_TAG + with scope: + result = yield defer.maybeDeferred(func, request, *args, **kwargs) + return result + return _trace_servlet_inner -tags = _DummyTagNames + return _trace_servlet_inner_1 diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 2e0594e581..c4be9273f6 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -22,6 +22,7 @@ from six.moves import urllib from twisted.internet import defer +import synapse.logging.opentracing as opentracing from synapse.api.errors import ( CodeMessageException, HttpResponseException, @@ -165,8 +166,12 @@ class ReplicationEndpoint(object): # have a good idea that the request has either succeeded or failed on # the master, and so whether we should clean up or not. while True: + headers = {} + opentracing.inject_active_span_byte_dict( + headers, None, check_destination=False + ) try: - result = yield request_func(uri, data) + result = yield request_func(uri, data, headers=headers) break except CodeMessageException as e: if e.code != 504 or not cls.RETRY_ON_TIMEOUT: @@ -205,7 +210,14 @@ class ReplicationEndpoint(object): args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args) pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args)) - http_server.register_paths(method, [pattern], handler, self.__class__.__name__) + http_server.register_paths( + method, + [pattern], + opentracing.trace_servlet(self.__class__.__name__, extract_context=True)( + handler + ), + self.__class__.__name__, + ) def _cached_handler(self, request, txn_id, **kwargs): """Called on new incoming requests when caching is enabled. Checks -- cgit 1.4.1 From 8767b63a821eb8612e2ab830534fd6f40eb1aaaa Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Thu, 22 Aug 2019 18:21:10 +0100 Subject: Propagate opentracing contexts through EDUs (#5852) Propagate opentracing contexts through EDUs Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5852.feature | 1 + docs/opentracing.rst | 27 +++- synapse/federation/federation_server.py | 15 +- synapse/federation/sender/transaction_manager.py | 170 ++++++++++++--------- synapse/federation/units.py | 3 + synapse/handlers/devicemessage.py | 27 +++- synapse/logging/opentracing.py | 26 ++++ synapse/storage/devices.py | 39 ++++- .../schema/delta/56/add_spans_to_device_lists.sql | 20 +++ 9 files changed, 234 insertions(+), 94 deletions(-) create mode 100644 changelog.d/5852.feature create mode 100644 synapse/storage/schema/delta/56/add_spans_to_device_lists.sql diff --git a/changelog.d/5852.feature b/changelog.d/5852.feature new file mode 100644 index 0000000000..4a0fc6c542 --- /dev/null +++ b/changelog.d/5852.feature @@ -0,0 +1 @@ +Pass opentracing contexts between servers when transmitting EDUs. diff --git a/docs/opentracing.rst b/docs/opentracing.rst index b91a2208a8..6e98ab56ba 100644 --- a/docs/opentracing.rst +++ b/docs/opentracing.rst @@ -32,7 +32,7 @@ It is up to the remote server to decide what it does with the spans it creates. This is called the sampling policy and it can be configured through Jaeger's settings. -For OpenTracing concepts see +For OpenTracing concepts see https://opentracing.io/docs/overview/what-is-tracing/. For more information about Jaeger's implementation see @@ -79,7 +79,7 @@ Homeserver whitelisting The homeserver whitelist is configured using regular expressions. A list of regular expressions can be given and their union will be compared when propagating any -spans contexts to another homeserver. +spans contexts to another homeserver. Though it's mostly safe to send and receive span contexts to and from untrusted users since span contexts are usually opaque ids it can lead to @@ -92,6 +92,29 @@ two problems, namely: but that doesn't prevent another server sending you baggage which will be logged to OpenTracing's logs. +========== +EDU FORMAT +========== + +EDUs can contain tracing data in their content. This is not specced but +it could be of interest for other homeservers. + +EDU format (if you're using jaeger): + +.. code-block:: json + + { + "edu_type": "type", + "content": { + "org.matrix.opentracing_context": { + "uber-trace-id": "fe57cf3e65083289" + } + } + } + +Though you don't have to use jaeger you must inject the span context into +`org.matrix.opentracing_context` using the opentracing `Format.TEXT_MAP` inject method. + ================== Configuring Jaeger ================== diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 9286ca3202..05fd49f3c1 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -43,7 +43,7 @@ from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.endpoint import parse_server_name from synapse.logging.context import nested_logging_context -from synapse.logging.opentracing import log_kv, trace +from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, @@ -811,12 +811,13 @@ class FederationHandlerRegistry(object): if not handler: logger.warn("No handler registered for EDU type %s", edu_type) - try: - yield handler(origin, content) - except SynapseError as e: - logger.info("Failed to handle edu %r: %r", edu_type, e) - except Exception: - logger.exception("Failed to handle edu %r", edu_type) + with start_active_span_from_edu(content, "handle_edu"): + try: + yield handler(origin, content) + except SynapseError as e: + logger.info("Failed to handle edu %r: %r", edu_type, e) + except Exception: + logger.exception("Failed to handle edu %r", edu_type) def on_query(self, query_type, args): handler = self.query_handlers.get(query_type) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 52706302f2..62ca6a3e87 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -14,11 +14,19 @@ # limitations under the License. import logging +from canonicaljson import json + from twisted.internet import defer from synapse.api.errors import HttpResponseException from synapse.federation.persistence import TransactionActions from synapse.federation.units import Transaction +from synapse.logging.opentracing import ( + extract_text_map, + set_tag, + start_active_span_follows_from, + tags, +) from synapse.util.metrics import measure_func logger = logging.getLogger(__name__) @@ -44,93 +52,109 @@ class TransactionManager(object): @defer.inlineCallbacks def send_new_transaction(self, destination, pending_pdus, pending_edus): - # Sort based on the order field - pending_pdus.sort(key=lambda t: t[1]) - pdus = [x[0] for x in pending_pdus] - edus = pending_edus + # Make a transaction-sending opentracing span. This span follows on from + # all the edus in that transaction. This needs to be done since there is + # no active span here, so if the edus were not received by the remote the + # span would have no causality and it would be forgotten. + # The span_contexts is a generator so that it won't be evaluated if + # opentracing is disabled. (Yay speed!) - success = True + span_contexts = ( + extract_text_map(json.loads(edu.get_context())) for edu in pending_edus + ) - logger.debug("TX [%s] _attempt_new_transaction", destination) + with start_active_span_follows_from("send_transaction", span_contexts): - txn_id = str(self._next_txn_id) + # Sort based on the order field + pending_pdus.sort(key=lambda t: t[1]) + pdus = [x[0] for x in pending_pdus] + edus = pending_edus - logger.debug( - "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)", - destination, - txn_id, - len(pdus), - len(edus), - ) + success = True - transaction = Transaction.create_new( - origin_server_ts=int(self.clock.time_msec()), - transaction_id=txn_id, - origin=self._server_name, - destination=destination, - pdus=pdus, - edus=edus, - ) + logger.debug("TX [%s] _attempt_new_transaction", destination) - self._next_txn_id += 1 + txn_id = str(self._next_txn_id) - logger.info( - "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)", - destination, - txn_id, - transaction.transaction_id, - len(pdus), - len(edus), - ) + logger.debug( + "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)", + destination, + txn_id, + len(pdus), + len(edus), + ) - # Actually send the transaction - - # FIXME (erikj): This is a bit of a hack to make the Pdu age - # keys work - def json_data_cb(): - data = transaction.get_dict() - now = int(self.clock.time_msec()) - if "pdus" in data: - for p in data["pdus"]: - if "age_ts" in p: - unsigned = p.setdefault("unsigned", {}) - unsigned["age"] = now - int(p["age_ts"]) - del p["age_ts"] - return data - - try: - response = yield self._transport_layer.send_transaction( - transaction, json_data_cb + transaction = Transaction.create_new( + origin_server_ts=int(self.clock.time_msec()), + transaction_id=txn_id, + origin=self._server_name, + destination=destination, + pdus=pdus, + edus=edus, ) - code = 200 - except HttpResponseException as e: - code = e.code - response = e.response - if e.code in (401, 404, 429) or 500 <= e.code: - logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) - raise e + self._next_txn_id += 1 - logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) + logger.info( + "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)", + destination, + txn_id, + transaction.transaction_id, + len(pdus), + len(edus), + ) - if code == 200: - for e_id, r in response.get("pdus", {}).items(): - if "error" in r: + # Actually send the transaction + + # FIXME (erikj): This is a bit of a hack to make the Pdu age + # keys work + def json_data_cb(): + data = transaction.get_dict() + now = int(self.clock.time_msec()) + if "pdus" in data: + for p in data["pdus"]: + if "age_ts" in p: + unsigned = p.setdefault("unsigned", {}) + unsigned["age"] = now - int(p["age_ts"]) + del p["age_ts"] + return data + + try: + response = yield self._transport_layer.send_transaction( + transaction, json_data_cb + ) + code = 200 + except HttpResponseException as e: + code = e.code + response = e.response + + if e.code in (401, 404, 429) or 500 <= e.code: + logger.info( + "TX [%s] {%s} got %d response", destination, txn_id, code + ) + raise e + + logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) + + if code == 200: + for e_id, r in response.get("pdus", {}).items(): + if "error" in r: + logger.warn( + "TX [%s] {%s} Remote returned error for %s: %s", + destination, + txn_id, + e_id, + r, + ) + else: + for p in pdus: logger.warn( - "TX [%s] {%s} Remote returned error for %s: %s", + "TX [%s] {%s} Failed to send event %s", destination, txn_id, - e_id, - r, + p.event_id, ) - else: - for p in pdus: - logger.warn( - "TX [%s] {%s} Failed to send event %s", - destination, - txn_id, - p.event_id, - ) - success = False + success = False - return success + set_tag(tags.ERROR, not success) + return success diff --git a/synapse/federation/units.py b/synapse/federation/units.py index 14aad8f09d..aa84621206 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -38,6 +38,9 @@ class Edu(JsonEncodedObject): internal_keys = ["origin", "destination"] + def get_context(self): + return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}") + class Transaction(JsonEncodedObject): """ A transaction is a list of Pdus and Edus to be sent to a remote home diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index e1ebb6346c..c7d56779b8 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -15,9 +15,17 @@ import logging +from canonicaljson import json + from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.logging.opentracing import ( + get_active_span_text_map, + set_tag, + start_active_span, + whitelisted_homeserver, +) from synapse.types import UserID, get_domain_from_id from synapse.util.stringutils import random_string @@ -100,14 +108,21 @@ class DeviceMessageHandler(object): message_id = random_string(16) + context = get_active_span_text_map() + remote_edu_contents = {} for destination, messages in remote_messages.items(): - remote_edu_contents[destination] = { - "messages": messages, - "sender": sender_user_id, - "type": message_type, - "message_id": message_id, - } + with start_active_span("to_device_for_user"): + set_tag("destination", destination) + remote_edu_contents[destination] = { + "messages": messages, + "sender": sender_user_id, + "type": message_type, + "message_id": message_id, + "org.matrix.opentracing_context": json.dumps(context) + if whitelisted_homeserver(destination) + else None, + } stream_id = yield self.store.add_messages_to_device_inbox( local_messages, remote_edu_contents diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 4abea4474b..dd296027a1 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -149,6 +149,9 @@ unchartered waters will require the enforcement of the whitelist. ``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes in a destination and compares it to the whitelist. +Most injection methods take a 'destination' arg. The context will only be injected +if the destination matches the whitelist or the destination is None. + ======= Gotchas ======= @@ -576,6 +579,29 @@ def inject_active_span_text_map(carrier, destination, check_destination=True): ) +def get_active_span_text_map(destination=None): + """ + Gets a span context as a dict. This can be used instead of manually + injecting a span into an empty carrier. + + Args: + destination (str): the name of the remote server. + + Returns: + dict: the active span's context if opentracing is enabled, otherwise empty. + """ + + if not opentracing or (destination and not whitelisted_homeserver(destination)): + return {} + + carrier = {} + opentracing.tracer.inject( + opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + ) + + return carrier + + def active_span_context_as_string(): """ Returns: diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 8f72d92895..e11881161d 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -21,6 +21,11 @@ from canonicaljson import json from twisted.internet import defer from synapse.api.errors import StoreError +from synapse.logging.opentracing import ( + get_active_span_text_map, + trace, + whitelisted_homeserver, +) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import Cache, SQLBaseStore, db_to_json from synapse.storage.background_updates import BackgroundUpdateStore @@ -73,6 +78,7 @@ class DeviceWorkerStore(SQLBaseStore): return {d["device_id"]: d for d in devices} + @trace @defer.inlineCallbacks def get_devices_by_remote(self, destination, from_stream_id, limit): """Get stream of updates to send to remote servers @@ -127,8 +133,15 @@ class DeviceWorkerStore(SQLBaseStore): # (user_id, device_id) entries into a map, with the value being # the max stream_id across each set of duplicate entries # - # maps (user_id, device_id) -> stream_id + # maps (user_id, device_id) -> (stream_id, opentracing_context) # as long as their stream_id does not match that of the last row + # + # opentracing_context contains the opentracing metadata for the request + # that created the poke + # + # The most recent request's opentracing_context is used as the + # context which created the Edu. + query_map = {} for update in updates: if stream_id_cutoff is not None and update[2] >= stream_id_cutoff: @@ -136,7 +149,14 @@ class DeviceWorkerStore(SQLBaseStore): break key = (update[0], update[1]) - query_map[key] = max(query_map.get(key, 0), update[2]) + + update_context = update[3] + update_stream_id = update[2] + + previous_update_stream_id, _ = query_map.get(key, (0, None)) + + if update_stream_id > previous_update_stream_id: + query_map[key] = (update_stream_id, update_context) # If we didn't find any updates with a stream_id lower than the cutoff, it # means that there are more than limit updates all of which have the same @@ -171,7 +191,7 @@ class DeviceWorkerStore(SQLBaseStore): List: List of device updates """ sql = """ - SELECT user_id, device_id, stream_id FROM device_lists_outbound_pokes + SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ? ORDER BY stream_id LIMIT ? @@ -187,8 +207,9 @@ class DeviceWorkerStore(SQLBaseStore): Args: destination (str): The host the device updates are intended for from_stream_id (int): The minimum stream_id to filter updates by, exclusive - query_map (Dict[(str, str): int]): Dictionary mapping - user_id/device_id to update stream_id + query_map (Dict[(str, str): (int, str|None)]): Dictionary mapping + user_id/device_id to update stream_id and the relevent json-encoded + opentracing context Returns: List[Dict]: List of objects representing an device update EDU @@ -210,12 +231,13 @@ class DeviceWorkerStore(SQLBaseStore): destination, user_id, from_stream_id ) for device_id, device in iteritems(user_devices): - stream_id = query_map[(user_id, device_id)] + stream_id, opentracing_context = query_map[(user_id, device_id)] result = { "user_id": user_id, "device_id": device_id, "prev_id": [prev_id] if prev_id else [], "stream_id": stream_id, + "org.matrix.opentracing_context": opentracing_context, } prev_id = stream_id @@ -814,6 +836,8 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): ], ) + context = get_active_span_text_map() + self._simple_insert_many_txn( txn, table="device_lists_outbound_pokes", @@ -825,6 +849,9 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): "device_id": device_id, "sent": False, "ts": now, + "opentracing_context": json.dumps(context) + if whitelisted_homeserver(destination) + else None, } for destination in hosts for device_id in device_ids diff --git a/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql b/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql new file mode 100644 index 0000000000..41807eb1e7 --- /dev/null +++ b/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql @@ -0,0 +1,20 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Opentracing context data for inclusion in the device_list_update EDUs, as a + * json-encoded dictionary. NULL if opentracing is disabled (or not enabled for this destination). + */ +ALTER TABLE device_lists_outbound_pokes ADD opentracing_context TEXT; -- cgit 1.4.1 From 886eceba3e134325e6943aa63eda0a70a7aca911 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Fri, 23 Aug 2019 09:14:52 +0100 Subject: Return user_type in get_user_by_id --- synapse/handlers/message.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a5e23c4caf..97a89fe882 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -24,7 +24,7 @@ from twisted.internet import defer from twisted.internet.defer import succeed from synapse import event_auth -from synapse.api.constants import EventTypes, Membership, RelationTypes +from synapse.api.constants import EventTypes, Membership, RelationTypes, UserTypes from synapse.api.errors import ( AuthError, Codes, @@ -469,6 +469,9 @@ class EventCreationHandler(object): u = yield self.store.get_user_by_id(user_id) assert u is not None + if u["user_type"] == UserTypes.SUPPORT: + # support users are not required to consent + return if u["appservice_id"] is not None: # users registered by an appservice are exempt return -- cgit 1.4.1 From ae38e0569fdb592875b45d4b37f200af5f6a86fc Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Fri, 23 Aug 2019 09:15:10 +0100 Subject: Ignore consent for support users --- synapse/storage/registration.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 55e4e84d71..938fd00717 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -56,6 +56,7 @@ class RegistrationWorkerStore(SQLBaseStore): "consent_server_notice_sent", "appservice_id", "creation_ts", + "user_type", ], allow_none=True, desc="get_user_by_id", -- cgit 1.4.1 From 80793e813c7c4024ce28ab9d56d00fd4b4812800 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Fri, 23 Aug 2019 09:20:31 +0100 Subject: newsfile 5902 --- changelog.d/5902.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5902.feature diff --git a/changelog.d/5902.feature b/changelog.d/5902.feature new file mode 100644 index 0000000000..c1c5c97b18 --- /dev/null +++ b/changelog.d/5902.feature @@ -0,0 +1 @@ +Support users are no longer required to consent. \ No newline at end of file -- cgit 1.4.1 From 0fb5189072f61416e7f616aa9d90d8c981e6b8b3 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Fri, 23 Aug 2019 09:25:35 +0100 Subject: Fix registration test --- tests/storage/test_registration.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 0253c4ac05..4578cc3b60 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -49,6 +49,7 @@ class RegistrationStoreTestCase(unittest.TestCase): "consent_server_notice_sent": None, "appservice_id": None, "creation_ts": 1000, + "user_type": None, }, (yield self.store.get_user_by_id(self.user_id)), ) -- cgit 1.4.1 From d9b8cf81be1a1132d29e89e3b7d2451886783163 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Fri, 23 Aug 2019 09:52:09 +0100 Subject: Add bot type --- synapse/api/constants.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 3ffde0d7fc..f29bce560c 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -122,7 +122,8 @@ class UserTypes(object): """ SUPPORT = "support" - ALL_USER_TYPES = (SUPPORT,) + BOT = "bot" + ALL_USER_TYPES = (SUPPORT, BOT) class RelationTypes(object): -- cgit 1.4.1 From 971c980c6ee4a396a94987c85e5b8db01ceed0d0 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Fri, 23 Aug 2019 09:53:48 +0100 Subject: Add changelog --- changelog.d/5903.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5903.feature diff --git a/changelog.d/5903.feature b/changelog.d/5903.feature new file mode 100644 index 0000000000..fc60d02107 --- /dev/null +++ b/changelog.d/5903.feature @@ -0,0 +1 @@ +Add bot user type. -- cgit 1.4.1 From 9ba32f6573a47368c1ac63ec769c20f73bcc2cc5 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Fri, 23 Aug 2019 09:56:31 +0100 Subject: Exempt bot users --- synapse/handlers/message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 97a89fe882..c656cffc07 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -469,7 +469,7 @@ class EventCreationHandler(object): u = yield self.store.get_user_by_id(user_id) assert u is not None - if u["user_type"] == UserTypes.SUPPORT: + if u["user_type"] == UserTypes.SUPPORT or u["user_type"] == UserTypes.BOT: # support users are not required to consent return if u["appservice_id"] is not None: -- cgit 1.4.1 From 4a2d2c2b6f42cef95c7e70ecbc27e52b9976fd15 Mon Sep 17 00:00:00 2001 From: Half-Shot Date: Fri, 23 Aug 2019 09:57:07 +0100 Subject: Update changelog --- changelog.d/5902.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5902.feature b/changelog.d/5902.feature index c1c5c97b18..0660f65cfa 100644 --- a/changelog.d/5902.feature +++ b/changelog.d/5902.feature @@ -1 +1 @@ -Support users are no longer required to consent. \ No newline at end of file +Users with the type of "support" or "bot" are no longer required to consent. \ No newline at end of file -- cgit 1.4.1 From c998f250065bd3c0f51a6e6ac3b333ede98ed6a7 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Fri, 23 Aug 2019 10:28:54 +0100 Subject: Apply suggestions from code review Co-Authored-By: Erik Johnston --- synapse/handlers/message.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c656cffc07..111f7c7e2f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -469,8 +469,8 @@ class EventCreationHandler(object): u = yield self.store.get_user_by_id(user_id) assert u is not None - if u["user_type"] == UserTypes.SUPPORT or u["user_type"] == UserTypes.BOT: - # support users are not required to consent + if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT): + # support and bot users are not required to consent return if u["appservice_id"] is not None: # users registered by an appservice are exempt -- cgit 1.4.1 From 7af5a63063aa69888ab59ee997cc3d1459d25af4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Aug 2019 14:52:11 +0100 Subject: Fixup review comments --- docs/sample_config.yaml | 4 ++-- synapse/crypto/keyring.py | 4 ++-- synapse/rest/key/v2/remote_key_resource.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index c96eb0cf2d..ae1cafc5f3 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1029,8 +1029,8 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # - server_name: "matrix.org" # -# The additional signing keys to use when acting as a trusted key server, on -# top of the normal signing keys. +# The signing keys to use when acting as a trusted key server. If not specified +# defaults to the server signing key. # # Can contain multiple keys, one per line. # diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index abeb0ac26e..2d7434fb2f 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -539,7 +539,7 @@ class BaseV2KeyFetcher(object): verify_key=verify_key, valid_until_ts=key_data["expired_ts"] ) - signed_key_json_bytes = encode_canonical_json(response_json) + key_json_bytes = encode_canonical_json(response_json) yield make_deferred_yieldable( defer.gatherResults( @@ -551,7 +551,7 @@ class BaseV2KeyFetcher(object): from_server=from_server, ts_now_ms=time_added_ms, ts_expires_ms=ts_valid_until_ms, - key_json_bytes=signed_key_json_bytes, + key_json_bytes=key_json_bytes, ) for key_id in verify_keys ], diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index f3398c9523..55580bc59e 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -14,7 +14,7 @@ import logging -from canonicaljson import json +from canonicaljson import encode_canonical_json, json from signedjson.sign import sign_json from twisted.internet import defer @@ -227,4 +227,4 @@ class RemoteKey(DirectServeResource): results = {"server_keys": signed_keys} - respond_with_json_bytes(request, 200, json.dumps(results).encode("utf-8")) + respond_with_json_bytes(request, 200, encode_canonical_json(results)) -- cgit 1.4.1 From fe0ac98e6653903cce43b1c5a3be77ef4f626867 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Aug 2019 14:54:20 +0100 Subject: Don't implicitly include server signing key --- synapse/config/key.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/synapse/config/key.py b/synapse/config/key.py index f1a1efcb7f..ba2199bceb 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -85,14 +85,13 @@ class KeyConfig(Config): config.get("key_refresh_interval", "1d") ) - self.key_server_signing_keys = list(self.signing_key) key_server_signing_keys_path = config.get("key_server_signing_keys_path") if key_server_signing_keys_path: - self.key_server_signing_keys.extend( - self.read_signing_keys( - key_server_signing_keys_path, "key_server_signing_keys_path" - ) + self.key_server_signing_keys = self.read_signing_keys( + key_server_signing_keys_path, "key_server_signing_keys_path" ) + else: + self.key_server_signing_keys = list(self.signing_key) # if neither trusted_key_servers nor perspectives are given, use the default. if "perspectives" not in config and "trusted_key_servers" not in config: @@ -221,8 +220,8 @@ class KeyConfig(Config): # - server_name: "matrix.org" # - # The additional signing keys to use when acting as a trusted key server, on - # top of the normal signing keys. + # The signing keys to use when acting as a trusted key server. If not specified + # defaults to the server signing key. # # Can contain multiple keys, one per line. # -- cgit 1.4.1 From e70f0081da5bbea772316dffda5f173e9568d1d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Aug 2019 15:05:56 +0100 Subject: Fix logcontexts --- synapse/http/federation/matrix_federation_agent.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index a8815f078a..62883c06a4 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -28,7 +28,7 @@ from twisted.web.iweb import IAgent, IAgentEndpointFactory from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.http.federation.well_known_resolver import WellKnownResolver -from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.util import Clock logger = logging.getLogger(__name__) @@ -158,8 +158,9 @@ class MatrixFederationAgent(object): if not headers.hasHeader(b"host"): headers.addRawHeader(b"host", parsed_uri.netloc) - with PreserveLoggingContext(): - res = yield self._agent.request(method, uri, headers, bodyProducer) + res = yield make_deferred_yieldable( + self._agent.request(method, uri, headers, bodyProducer) + ) return res @@ -214,11 +215,14 @@ class MatrixHostnameEndpoint(object): self._srv_resolver = srv_resolver - @defer.inlineCallbacks def connect(self, protocol_factory): """Implements IStreamClientEndpoint interface """ + return run_in_background(self._do_connect, protocol_factory) + + @defer.inlineCallbacks + def _do_connect(self, protocol_factory): first_exception = None server_list = yield self._resolve_server() -- cgit 1.4.1 From fbb758a7cef9282fee605eb6bc9f1b2d430d8d62 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Aug 2019 15:09:08 +0100 Subject: Fixup comments --- synapse/http/federation/matrix_federation_agent.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 62883c06a4..feae7de5be 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -192,12 +192,19 @@ class MatrixHostnameEndpointFactory(object): class MatrixHostnameEndpoint(object): """An endpoint that resolves matrix:// URLs using Matrix server name resolution (i.e. via SRV). Does not check for well-known delegation. + + Args: + reactor (IReactor) + tls_client_options_factory (ClientTLSOptionsFactory|None): + factory to use for fetching client tls options, or none to disable TLS. + srv_resolver (SrvResolver): The SRV resolver to use + parsed_uri (twisted.web.client.URI): The parsed URI that we're wanting + to connect to. """ def __init__(self, reactor, tls_client_options_factory, srv_resolver, parsed_uri): self._reactor = reactor - # We reparse the URI so that defaultPort is -1 rather than 80 self._parsed_uri = parsed_uri # set up the TLS connection params @@ -272,6 +279,7 @@ class MatrixHostnameEndpoint(object): # before now, due to needing to rewrite the Host header of the HTTP # request. + # We reparse the URI so that defaultPort is -1 rather than 80 parsed_uri = urllib.parse.urlparse(self._parsed_uri.toBytes()) host = parsed_uri.hostname -- cgit 1.4.1 From 27d3fc421ab03361b03e4b9b4dd0d912b09412ba Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 24 Aug 2019 22:33:43 +0100 Subject: Increase max display name limit --- changelog.d/5906.feature | 1 + synapse/handlers/profile.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5906.feature diff --git a/changelog.d/5906.feature b/changelog.d/5906.feature new file mode 100644 index 0000000000..7c789510a6 --- /dev/null +++ b/changelog.d/5906.feature @@ -0,0 +1 @@ +Increase max display name size to 256. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 2cc237e6a5..8690f69d45 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -34,7 +34,7 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) -MAX_DISPLAYNAME_LEN = 100 +MAX_DISPLAYNAME_LEN = 256 MAX_AVATAR_URL_LEN = 1000 -- cgit 1.4.1 From e8e3e033eea2947c3746005f876afca55c601f1d Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 26 Aug 2019 21:01:47 -0500 Subject: public_base_url is actually public_baseurl Signed-off-by: Aaron Raimist --- synapse/config/emailconfig.py | 2 +- synapse/rest/well_known.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 36d01a10af..f83c05df44 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -115,7 +115,7 @@ class EmailConfig(Config): missing.append("email." + k) if config.get("public_baseurl") is None: - missing.append("public_base_url") + missing.append("public_baseurl") if len(missing) > 0: raise RuntimeError( diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index 5e8fda4b65..20177b44e7 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -34,7 +34,7 @@ class WellKnownBuilder(object): self._config = hs.config def get_well_known(self): - # if we don't have a public_base_url, we can't help much here. + # if we don't have a public_baseurl, we can't help much here. if self._config.public_baseurl is None: return None -- cgit 1.4.1 From c25137a99f9dd79ca3f712243997eb6da7614a2f Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 26 Aug 2019 21:06:08 -0500 Subject: Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5909.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5909.misc diff --git a/changelog.d/5909.misc b/changelog.d/5909.misc new file mode 100644 index 0000000000..73e35cc48d --- /dev/null +++ b/changelog.d/5909.misc @@ -0,0 +1 @@ +Fix error message which referred to public_base_url instead of public_baseurl. -- cgit 1.4.1 From aefa76f5cd70f20808947605e76e9570aaff58ed Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 27 Aug 2019 08:52:20 +0100 Subject: Allow schema deltas to be engine-specific Signed-off-by: Olivier Wilkinson (reivilibre) --- synapse/storage/prepare_database.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index d20eacda59..0270cd6f6c 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -238,6 +238,15 @@ def _upgrade_existing_database( logger.debug("applied_delta_files: %s", applied_delta_files) + if isinstance(database_engine, PostgresEngine): + specific_engine_extension = ".postgres" + else: + specific_engine_extension = ".sqlite" + + specific_engine_extensions = ( + ".sqlite", ".postgres" + ) + for v in range(start_ver, SCHEMA_VERSION + 1): logger.info("Upgrading schema to v%d", v) @@ -274,15 +283,22 @@ def _upgrade_existing_database( # Sometimes .pyc files turn up anyway even though we've # disabled their generation; e.g. from distribution package # installers. Silently skip it - pass + continue elif ext == ".sql": # A plain old .sql file, just read and execute it logger.info("Applying schema %s", relative_path) executescript(cur, absolute_path) + elif ext == specific_engine_extension and root_name.endswith(".sql"): + # A .sql file specific to our engine; just read and execute it + logger.info("Applying engine-specific schema %s", relative_path) + executescript(cur, absolute_path) + elif ext in specific_engine_extensions and root_name.endswith(".sql"): + # A .sql file for a different engine; skip it. + continue else: # Not a valid delta file. - logger.warn( - "Found directory entry that did not end in .py or" " .sql: %s", + logger.warning( + "Found directory entry that did not end in .py or .sql: %s", relative_path, ) continue @@ -290,7 +306,7 @@ def _upgrade_existing_database( # Mark as done. cur.execute( database_engine.convert_param_style( - "INSERT INTO applied_schema_deltas (version, file)" " VALUES (?,?)" + "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)" ), (v, relative_path), ) @@ -298,7 +314,7 @@ def _upgrade_existing_database( cur.execute("DELETE FROM schema_version") cur.execute( database_engine.convert_param_style( - "INSERT INTO schema_version (version, upgraded)" " VALUES (?,?)" + "INSERT INTO schema_version (version, upgraded) VALUES (?,?)" ), (v, True), ) -- cgit 1.4.1 From 62a1639287be270c8471a4de33804542b444bb8e Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 27 Aug 2019 09:36:12 +0100 Subject: Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/5911.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5911.misc diff --git a/changelog.d/5911.misc b/changelog.d/5911.misc new file mode 100644 index 0000000000..fe5a8fd59c --- /dev/null +++ b/changelog.d/5911.misc @@ -0,0 +1 @@ +Add support for database engine-specific schema deltas, based on file extension. \ No newline at end of file -- cgit 1.4.1 From d1e0b91083b9dd0dcbb9fa5819c8072c9e8625ef Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 27 Aug 2019 09:39:11 +0100 Subject: Code style (Black) Signed-off-by: Olivier Wilkinson (reivilibre) --- synapse/storage/prepare_database.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 0270cd6f6c..e96eed8a6d 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -243,9 +243,7 @@ def _upgrade_existing_database( else: specific_engine_extension = ".sqlite" - specific_engine_extensions = ( - ".sqlite", ".postgres" - ) + specific_engine_extensions = (".sqlite", ".postgres") for v in range(start_ver, SCHEMA_VERSION + 1): logger.info("Upgrading schema to v%d", v) -- cgit 1.4.1 From 1a7e6eb63387704ef379bf962318f710ce5ae5f3 Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Tue, 27 Aug 2019 10:14:00 +0100 Subject: Add Admin API capability to set adminship of a user (#5878) Admin API: Set adminship of a user --- changelog.d/5878.feature | 1 + docs/admin_api/user_admin_api.rst | 20 +++++++++++ synapse/handlers/admin.py | 10 ++++++ synapse/rest/admin/__init__.py | 2 ++ synapse/rest/admin/users.py | 76 +++++++++++++++++++++++++++++++++++++++ synapse/storage/registration.py | 23 ++++++++++++ 6 files changed, 132 insertions(+) create mode 100644 changelog.d/5878.feature create mode 100644 synapse/rest/admin/users.py diff --git a/changelog.d/5878.feature b/changelog.d/5878.feature new file mode 100644 index 0000000000..d9d6df880e --- /dev/null +++ b/changelog.d/5878.feature @@ -0,0 +1 @@ +Add admin API endpoint for setting whether or not a user is a server administrator. diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 213359d0c0..6ee5080eed 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -84,3 +84,23 @@ with a body of: } including an ``access_token`` of a server admin. + + +Change whether a user is a server administrator or not +====================================================== + +Note that you cannot demote yourself. + +The api is:: + + PUT /_synapse/admin/v1/users//admin + +with a body of: + +.. code:: json + + { + "admin": true + } + +including an ``access_token`` of a server admin. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 2f22f56ca4..d30a68b650 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -94,6 +94,16 @@ class AdminHandler(BaseHandler): return ret + def set_user_server_admin(self, user, admin): + """ + Set the admin bit on a user. + + Args: + user_id (UserID): the (necessarily local) user to manipulate + admin (bool): whether or not the user should be an admin of this server + """ + return self.store.set_server_admin(user, admin) + @defer.inlineCallbacks def export_user_data(self, user_id, writer): """Write all data we have on the user to the given writer. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 0dce256840..9ab1c2c9e0 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -44,6 +44,7 @@ from synapse.rest.admin._base import ( from synapse.rest.admin.media import register_servlets_for_media_repo from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet +from synapse.rest.admin.users import UserAdminServlet from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string @@ -742,6 +743,7 @@ def register_servlets(hs, http_server): PurgeRoomServlet(hs).register(http_server) SendServerNoticeServlet(hs).register(http_server) VersionServlet(hs).register(http_server) + UserAdminServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py new file mode 100644 index 0000000000..b0fddb6898 --- /dev/null +++ b/synapse/rest/admin/users.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) +from synapse.rest.admin import assert_requester_is_admin +from synapse.types import UserID + + +class UserAdminServlet(RestServlet): + """ + Set whether or not a user is a server administrator. + + Note that only local users can be server administrators, and that an + administrator may not demote themselves. + + Only server administrators can use this API. + + Example: + PUT /_synapse/admin/v1/users/@reivilibre:librepush.net/admin + { + "admin": true + } + """ + + PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P@[^/]*)/admin$"),) + + def __init__(self, hs): + self.hs = hs + self.auth = hs.get_auth() + self.handlers = hs.get_handlers() + + @defer.inlineCallbacks + def on_PUT(self, request, user_id): + yield assert_requester_is_admin(self.auth, request) + requester = yield self.auth.get_user_by_req(request) + auth_user = requester.user + + target_user = UserID.from_string(user_id) + + body = parse_json_object_from_request(request) + + assert_params_in_dict(body, ["admin"]) + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "Only local users can be admins of this homeserver") + + set_admin_to = bool(body["admin"]) + + if target_user == auth_user and not set_admin_to: + raise SynapseError(400, "You may not demote yourself.") + + yield self.handlers.admin_handler.set_user_server_admin( + target_user, set_admin_to + ) + + return (200, {}) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 55e4e84d71..9027b917c1 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -272,6 +272,14 @@ class RegistrationWorkerStore(SQLBaseStore): @defer.inlineCallbacks def is_server_admin(self, user): + """Determines if a user is an admin of this homeserver. + + Args: + user (UserID): user ID of the user to test + + Returns (bool): + true iff the user is a server admin, false otherwise. + """ res = yield self._simple_select_one_onecol( table="users", keyvalues={"name": user.to_string()}, @@ -282,6 +290,21 @@ class RegistrationWorkerStore(SQLBaseStore): return res if res else False + def set_server_admin(self, user, admin): + """Sets whether a user is an admin of this homeserver. + + Args: + user (UserID): user ID of the user to test + admin (bool): true iff the user is to be a server admin, + false otherwise. + """ + return self._simple_update_one( + table="users", + keyvalues={"name": user.to_string()}, + updatevalues={"admin": 1 if admin else 0}, + desc="set_server_admin", + ) + def _query_for_auth(self, txn, token): sql = ( "SELECT users.name, users.is_guest, access_tokens.id as token_id," -- cgit 1.4.1 From e7577427c90a364601889ac983c760d825d9a530 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 27 Aug 2019 11:50:52 +0100 Subject: Update 5909.misc --- changelog.d/5909.misc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5909.misc b/changelog.d/5909.misc index 73e35cc48d..03d0c4367b 100644 --- a/changelog.d/5909.misc +++ b/changelog.d/5909.misc @@ -1 +1 @@ -Fix error message which referred to public_base_url instead of public_baseurl. +Fix error message which referred to public_base_url instead of public_baseurl. Thanks to @aaronraimist for the fix! -- cgit 1.4.1 From c88a119259c8625b015db4cf8ea08e30ca16cc81 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 27 Aug 2019 13:12:27 +0100 Subject: Add GET method to admin API /users/@user:dom/admin Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/5914.feature | 1 + synapse/handlers/admin.py | 9 +++++++++ synapse/rest/admin/__init__.py | 2 +- synapse/rest/admin/users.py | 40 ++++++++++++++++++++++++++++++++-------- 4 files changed, 43 insertions(+), 9 deletions(-) create mode 100644 changelog.d/5914.feature diff --git a/changelog.d/5914.feature b/changelog.d/5914.feature new file mode 100644 index 0000000000..85c7bf5963 --- /dev/null +++ b/changelog.d/5914.feature @@ -0,0 +1 @@ +Add admin API endpoint for getting whether or not a user is a server administrator. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index d30a68b650..1a87b58838 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -94,6 +94,15 @@ class AdminHandler(BaseHandler): return ret + def get_user_server_admin(self, user): + """ + Get the admin bit on a user. + + Args: + user_id (UserID): the (necessarily local) user to manipulate + """ + return self.store.is_server_admin(user) + def set_user_server_admin(self, user, admin): """ Set the admin bit on a user. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 9ab1c2c9e0..fa91cc8dee 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -52,7 +52,7 @@ logger = logging.getLogger(__name__) class UsersRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)$") def __init__(self, hs): self.hs = hs diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index b0fddb6898..5364117420 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -22,24 +22,34 @@ from synapse.http.servlet import ( assert_params_in_dict, parse_json_object_from_request, ) -from synapse.rest.admin import assert_requester_is_admin +from synapse.rest.admin import assert_requester_is_admin, assert_user_is_admin from synapse.types import UserID class UserAdminServlet(RestServlet): """ - Set whether or not a user is a server administrator. + Get or set whether or not a user is a server administrator. Note that only local users can be server administrators, and that an administrator may not demote themselves. Only server administrators can use this API. - Example: - PUT /_synapse/admin/v1/users/@reivilibre:librepush.net/admin - { - "admin": true - } + Examples: + * Get + GET /_synapse/admin/v1/users/@nonadmin:example.com/admin + response on success: + { + "admin": false + } + * Set + PUT /_synapse/admin/v1/users/@reivilibre:librepush.net/admin + request body: + { + "admin": true + } + response on success: + {} """ PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P@[^/]*)/admin$"),) @@ -50,9 +60,23 @@ class UserAdminServlet(RestServlet): self.handlers = hs.get_handlers() @defer.inlineCallbacks - def on_PUT(self, request, user_id): + def on_GET(self, request, user_id): yield assert_requester_is_admin(self.auth, request) + + target_user = UserID.from_string(user_id) + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "Only local users can be admins of this homeserver") + + is_admin = yield self.handlers.admin_handler.get_user_server_admin(target_user) + is_admin = bool(is_admin) + + return (200, {"admin": is_admin}) + + @defer.inlineCallbacks + def on_PUT(self, request, user_id): requester = yield self.auth.get_user_by_req(request) + yield assert_user_is_admin(self.auth, requester.user) auth_user = requester.user target_user = UserID.from_string(user_id) -- cgit 1.4.1 From 1b959b6977249c1db198beb6e04e02fa667ebfab Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 27 Aug 2019 13:19:19 +0100 Subject: Document GET method for retrieving admin bit of user in admin API Signed-off-by: Olivier Wilkinson (reivilibre) --- docs/admin_api/user_admin_api.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 6ee5080eed..d0871f9438 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -86,6 +86,25 @@ with a body of: including an ``access_token`` of a server admin. +Get whether a user is a server administrator or not +=================================================== + + +The api is:: + + GET /_synapse/admin/v1/users//admin + +including an ``access_token`` of a server admin. + +A response body like the following is returned: + +.. code:: json + + { + "admin": true + } + + Change whether a user is a server administrator or not ====================================================== -- cgit 1.4.1 From 91caa5b4303bfa0b4604ecf95d56ae72a7074b0b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Aug 2019 13:56:42 +0100 Subject: Fix off by one error in SRV result shuffling --- synapse/http/federation/srv_resolver.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index c8ca3fd0e9..3fe4ffb9e5 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -66,17 +66,18 @@ def _sort_server_list(server_list): for priority in sorted(priority_map): servers = priority_map[priority] - # This algorithms follows the algorithm described in RFC2782. + # This algorithms roughly follows the algorithm described in RFC2782, + # changed to remove an off-by-one error. # - # N.B. Weights can be zero, which means that you should pick that server - # last *or* that its the only server in this priority. - - # We sort to ensure zero weighted items are first. - servers.sort(key=lambda s: s.weight) + # N.B. Weights can be zero, which means that they should be picked + # rarely. total_weight = sum(s.weight for s in servers) - while servers: - target_weight = random.randint(0, total_weight) + + # Total weight can become zero if there are only zero weight servers + # left, which we handle by just shuffling and appending to the results. + while servers and total_weight: + target_weight = random.randint(1, total_weight) for s in servers: target_weight -= s.weight @@ -88,6 +89,10 @@ def _sort_server_list(server_list): servers.remove(s) total_weight -= s.weight + if servers: + random.shuffle(servers) + results.extend(servers) + return results -- cgit 1.4.1 From 1196ee32b35587af82e7bf60dc60f2ba56c5f93c Mon Sep 17 00:00:00 2001 From: Victor Goff Date: Wed, 28 Aug 2019 04:34:49 -0400 Subject: Typographical corrections in docker/README (#5921) --- docker/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/README.md b/docker/README.md index 46bb9d2d99..d5879c2f2c 100644 --- a/docker/README.md +++ b/docker/README.md @@ -17,7 +17,7 @@ By default, the image expects a single volume, located at ``/data``, that will h * the appservices configuration. You are free to use separate volumes depending on storage endpoints at your -disposal. For instance, ``/data/media`` coud be stored on a large but low +disposal. For instance, ``/data/media`` could be stored on a large but low performance hdd storage while other files could be stored on high performance endpoints. @@ -27,8 +27,8 @@ configuration file there. Multiple application services are supported. ## Generating a configuration file -The first step is to genearte a valid config file. To do this, you can run the -image with the `generate` commandline option. +The first step is to generate a valid config file. To do this, you can run the +image with the `generate` command line option. You will need to specify values for the `SYNAPSE_SERVER_NAME` and `SYNAPSE_REPORT_STATS` environment variable, and mount a docker volume to store @@ -59,7 +59,7 @@ The following environment variables are supported in `generate` mode: * `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to `/homeserver.yaml`. * `SYNAPSE_DATA_DIR`: where the generated config will put persistent data - such as the datatase and media store. Defaults to `/data`. + such as the database and media store. Defaults to `/data`. * `UID`, `GID`: the user id and group id to use for creating the data directories. Defaults to `991`, `991`. @@ -115,7 +115,7 @@ not given). To migrate from a dynamic configuration file to a static one, run the docker container once with the environment variables set, and `migrate_config` -commandline option. For example: +command line option. For example: ``` docker run -it --rm \ -- cgit 1.4.1 From 49ef8ec3995db4d14e1b1367c9cd96ea231072f4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 28 Aug 2019 10:18:16 +0100 Subject: Fix a cache-invalidation bug for worker-based deployments (#5920) Some of the caches on worker processes were not being correctly invalidated when a room's state was changed in a way that did not affect the membership list of the room. We need to make sure we send out cache invalidations even when no memberships are changing. --- changelog.d/5920.bugfix | 1 + synapse/storage/_base.py | 24 ++++++++++++++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5920.bugfix diff --git a/changelog.d/5920.bugfix b/changelog.d/5920.bugfix new file mode 100644 index 0000000000..e45eb0ffee --- /dev/null +++ b/changelog.d/5920.bugfix @@ -0,0 +1 @@ +Fix a cache-invalidation bug for worker-based deployments. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 489ce82fae..abe16334ec 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1395,14 +1395,22 @@ class SQLBaseStore(object): """ txn.call_after(self._invalidate_state_caches, room_id, members_changed) - # We need to be careful that the size of the `members_changed` list - # isn't so large that it causes problems sending over replication, so we - # send them in chunks. - # Max line length is 16K, and max user ID length is 255, so 50 should - # be safe. - for chunk in batch_iter(members_changed, 50): - keys = itertools.chain([room_id], chunk) - self._send_invalidation_to_replication(txn, _CURRENT_STATE_CACHE_NAME, keys) + if members_changed: + # We need to be careful that the size of the `members_changed` list + # isn't so large that it causes problems sending over replication, so we + # send them in chunks. + # Max line length is 16K, and max user ID length is 255, so 50 should + # be safe. + for chunk in batch_iter(members_changed, 50): + keys = itertools.chain([room_id], chunk) + self._send_invalidation_to_replication( + txn, _CURRENT_STATE_CACHE_NAME, keys + ) + else: + # if no members changed, we still need to invalidate the other caches. + self._send_invalidation_to_replication( + txn, _CURRENT_STATE_CACHE_NAME, [room_id] + ) def _invalidate_state_caches(self, room_id, members_changed): """Invalidates caches that are based on the current state, but does -- cgit 1.4.1 From 7dc398586c2156a456d9526ac0e42c1fec9f8143 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 28 Aug 2019 21:18:53 +1000 Subject: Implement a structured logging output system. (#5680) --- .buildkite/docker-compose.py35.pg95.yaml | 1 + .buildkite/docker-compose.py37.pg11.yaml | 1 + .buildkite/docker-compose.py37.pg95.yaml | 1 + .buildkite/pipeline.yml | 10 +- .gitignore | 5 +- changelog.d/5680.misc | 1 + docs/structured_logging.md | 83 +++++++ synapse/app/_base.py | 12 +- synapse/app/admin_cmd.py | 4 +- synapse/app/appservice.py | 4 +- synapse/app/client_reader.py | 4 +- synapse/app/event_creator.py | 4 +- synapse/app/federation_reader.py | 4 +- synapse/app/federation_sender.py | 4 +- synapse/app/frontend_proxy.py | 4 +- synapse/app/homeserver.py | 4 +- synapse/app/media_repository.py | 4 +- synapse/app/pusher.py | 4 +- synapse/app/synchrotron.py | 4 +- synapse/app/user_dir.py | 4 +- synapse/config/logger.py | 103 +++++---- synapse/handlers/federation.py | 5 +- synapse/logging/_structured.py | 374 +++++++++++++++++++++++++++++++ synapse/logging/_terse_json.py | 278 +++++++++++++++++++++++ synapse/logging/context.py | 14 +- synapse/python_dependencies.py | 6 +- tests/logging/__init__.py | 0 tests/logging/test_structured.py | 197 ++++++++++++++++ tests/logging/test_terse_json.py | 234 +++++++++++++++++++ tests/server.py | 27 ++- tox.ini | 10 + 31 files changed, 1328 insertions(+), 82 deletions(-) create mode 100644 changelog.d/5680.misc create mode 100644 docs/structured_logging.md create mode 100644 synapse/logging/_structured.py create mode 100644 synapse/logging/_terse_json.py create mode 100644 tests/logging/__init__.py create mode 100644 tests/logging/test_structured.py create mode 100644 tests/logging/test_terse_json.py diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml index 2f14387fbc..aaea33006b 100644 --- a/.buildkite/docker-compose.py35.pg95.yaml +++ b/.buildkite/docker-compose.py35.pg95.yaml @@ -6,6 +6,7 @@ services: image: postgres:9.5 environment: POSTGRES_PASSWORD: postgres + command: -c fsync=off testenv: image: python:3.5 diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml index f3eec05ceb..1b32675e78 100644 --- a/.buildkite/docker-compose.py37.pg11.yaml +++ b/.buildkite/docker-compose.py37.pg11.yaml @@ -6,6 +6,7 @@ services: image: postgres:11 environment: POSTGRES_PASSWORD: postgres + command: -c fsync=off testenv: image: python:3.7 diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml index 2a41db8eba..7679f6508d 100644 --- a/.buildkite/docker-compose.py37.pg95.yaml +++ b/.buildkite/docker-compose.py37.pg95.yaml @@ -6,6 +6,7 @@ services: image: postgres:9.5 environment: POSTGRES_PASSWORD: postgres + command: -c fsync=off testenv: image: python:3.7 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index b75269a155..d9327227ed 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -45,8 +45,15 @@ steps: - docker#v3.0.1: image: "python:3.6" - - wait + - command: + - "python -m pip install tox" + - "tox -e mypy" + label: ":mypy: mypy" + plugins: + - docker#v3.0.1: + image: "python:3.5" + - wait - command: - "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev" @@ -55,6 +62,7 @@ steps: label: ":python: 3.5 / SQLite / Old Deps" env: TRIAL_FLAGS: "-j 2" + LANG: "C.UTF-8" plugins: - docker#v3.0.1: image: "ubuntu:xenial" # We use xenail to get an old sqlite and python diff --git a/.gitignore b/.gitignore index f6168a8819..e53d4908d5 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ _trial_temp*/ /*.signing.key /env/ /homeserver*.yaml +/logs /media_store/ /uploads @@ -29,8 +30,9 @@ _trial_temp*/ /.vscode/ # build products -/.coverage* !/.coveragerc +/.coverage* +/.mypy_cache/ /.tox /build/ /coverage.* @@ -38,4 +40,3 @@ _trial_temp*/ /docs/build/ /htmlcov /pip-wheel-metadata/ - diff --git a/changelog.d/5680.misc b/changelog.d/5680.misc new file mode 100644 index 0000000000..46a403a188 --- /dev/null +++ b/changelog.d/5680.misc @@ -0,0 +1 @@ +Lay the groundwork for structured logging output. diff --git a/docs/structured_logging.md b/docs/structured_logging.md new file mode 100644 index 0000000000..decec9b8fa --- /dev/null +++ b/docs/structured_logging.md @@ -0,0 +1,83 @@ +# Structured Logging + +A structured logging system can be useful when your logs are destined for a machine to parse and process. By maintaining its machine-readable characteristics, it enables more efficient searching and aggregations when consumed by software such as the "ELK stack". + +Synapse's structured logging system is configured via the file that Synapse's `log_config` config option points to. The file must be YAML and contain `structured: true`. It must contain a list of "drains" (places where logs go to). + +A structured logging configuration looks similar to the following: + +```yaml +structured: true + +loggers: + synapse: + level: INFO + synapse.storage.SQL: + level: WARNING + +drains: + console: + type: console + location: stdout + file: + type: file_json + location: homeserver.log +``` + +The above logging config will set Synapse as 'INFO' logging level by default, with the SQL layer at 'WARNING', and will have two logging drains (to the console and to a file, stored as JSON). + +## Drain Types + +Drain types can be specified by the `type` key. + +### `console` + +Outputs human-readable logs to the console. + +Arguments: + +- `location`: Either `stdout` or `stderr`. + +### `console_json` + +Outputs machine-readable JSON logs to the console. + +Arguments: + +- `location`: Either `stdout` or `stderr`. + +### `console_json_terse` + +Outputs machine-readable JSON logs to the console, separated by newlines. This +format is not designed to be read and re-formatted into human-readable text, but +is optimal for a logging aggregation system. + +Arguments: + +- `location`: Either `stdout` or `stderr`. + +### `file` + +Outputs human-readable logs to a file. + +Arguments: + +- `location`: An absolute path to the file to log to. + +### `file_json` + +Outputs machine-readable logs to a file. + +Arguments: + +- `location`: An absolute path to the file to log to. + +### `network_json_terse` + +Delivers machine-readable JSON logs to a log aggregator over TCP. This is +compatible with LogStash's TCP input with the codec set to `json_lines`. + +Arguments: + +- `host`: Hostname or IP address of the log aggregator. +- `port`: Numerical port to contact on the host. \ No newline at end of file diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 69dcf3523f..c30fdeee9a 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -36,18 +36,20 @@ from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) +# list of tuples of function, args list, kwargs dict _sighup_callbacks = [] -def register_sighup(func): +def register_sighup(func, *args, **kwargs): """ Register a function to be called when a SIGHUP occurs. Args: func (function): Function to be called when sent a SIGHUP signal. - Will be called with a single argument, the homeserver. + Will be called with a single default argument, the homeserver. + *args, **kwargs: args and kwargs to be passed to the target function. """ - _sighup_callbacks.append(func) + _sighup_callbacks.append((func, args, kwargs)) def start_worker_reactor(appname, config, run_command=reactor.run): @@ -248,8 +250,8 @@ def start(hs, listeners=None): # we're not using systemd. sdnotify(b"RELOADING=1") - for i in _sighup_callbacks: - i(hs) + for i, args, kwargs in _sighup_callbacks: + i(hs, *args, **kwargs) sdnotify(b"READY=1") diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 1fd52a5526..04751a6a5e 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -227,8 +227,6 @@ def start(config_options): config.start_pushers = False config.send_federation = False - setup_logging(config, use_worker_options=True) - synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -241,6 +239,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() # We use task.react as the basic run command as it correctly handles tearing diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 54bb114dec..767b87d2db 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -141,8 +141,6 @@ def start(config_options): assert config.worker_app == "synapse.app.appservice" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -167,6 +165,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ps, config, use_worker_options=True) + ps.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ps, config.worker_listeners diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 721bb5b119..86193d35a8 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -179,8 +179,6 @@ def start(config_options): assert config.worker_app == "synapse.app.client_reader" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -193,6 +191,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 473c8895d0..c67fe69a50 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -175,8 +175,6 @@ def start(config_options): assert config.worker_replication_http_port is not None - setup_logging(config, use_worker_options=True) - # This should only be done on the user directory worker or the master config.update_user_directory = False @@ -192,6 +190,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 5255d9e8cc..1ef027a88c 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -160,8 +160,6 @@ def start(config_options): assert config.worker_app == "synapse.app.federation_reader" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -174,6 +172,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index c5a2880e69..04fbb407af 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -171,8 +171,6 @@ def start(config_options): assert config.worker_app == "synapse.app.federation_sender" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -197,6 +195,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index e2822ca848..611d285421 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -232,8 +232,6 @@ def start(config_options): assert config.worker_main_http_uri is not None - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -246,6 +244,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8233905844..04f1ed14f3 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -341,8 +341,6 @@ def setup(config_options): # generating config files and shouldn't try to continue. sys.exit(0) - synapse.config.logger.setup_logging(config, use_worker_options=False) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -356,6 +354,8 @@ def setup(config_options): database_engine=database_engine, ) + synapse.config.logger.setup_logging(hs, config, use_worker_options=False) + logger.info("Preparing database: %s...", config.database_config["name"]) try: diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 3a168577c7..2ac783ffa3 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -155,8 +155,6 @@ def start(config_options): "Please add ``enable_media_repo: false`` to the main config\n" ) - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -169,6 +167,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 692ffa2f04..d84732ee3c 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -184,8 +184,6 @@ def start(config_options): assert config.worker_app == "synapse.app.pusher" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts if config.start_pushers: @@ -210,6 +208,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ps, config, use_worker_options=True) + ps.setup() def start(): diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index a1c3b162f7..473026fce5 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -435,8 +435,6 @@ def start(config_options): assert config.worker_app == "synapse.app.synchrotron" - setup_logging(config, use_worker_options=True) - synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -450,6 +448,8 @@ def start(config_options): application_service_handler=SynchrotronApplicationService(), ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index cb29a1afab..e01afb39f2 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -197,8 +197,6 @@ def start(config_options): assert config.worker_app == "synapse.app.user_dir" - setup_logging(config, use_worker_options=True) - events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) @@ -223,6 +221,8 @@ def start(config_options): database_engine=database_engine, ) + setup_logging(ss, config, use_worker_options=True) + ss.setup() reactor.addSystemEventTrigger( "before", "startup", _base.start, ss, config.worker_listeners diff --git a/synapse/config/logger.py b/synapse/config/logger.py index d321d00b80..981df5a10c 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -25,6 +25,10 @@ from twisted.logger import STDLibLogObserver, globalLogBeginner import synapse from synapse.app import _base as appbase +from synapse.logging._structured import ( + reload_structured_logging, + setup_structured_logging, +) from synapse.logging.context import LoggingContextFilter from synapse.util.versionstring import get_version_string @@ -119,21 +123,10 @@ class LoggingConfig(Config): log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file)) -def setup_logging(config, use_worker_options=False): - """ Set up python logging - - Args: - config (LoggingConfig | synapse.config.workers.WorkerConfig): - configuration data - - use_worker_options (bool): True to use the 'worker_log_config' option - instead of 'log_config'. - - register_sighup (func | None): Function to call to register a - sighup handler. +def _setup_stdlib_logging(config, log_config): + """ + Set up Python stdlib logging. """ - log_config = config.worker_log_config if use_worker_options else config.log_config - if log_config is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" @@ -151,35 +144,10 @@ def setup_logging(config, use_worker_options=False): handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: + logging.config.dictConfig(log_config) - def load_log_config(): - with open(log_config, "r") as f: - logging.config.dictConfig(yaml.safe_load(f)) - - def sighup(*args): - # it might be better to use a file watcher or something for this. - load_log_config() - logging.info("Reloaded log config from %s due to SIGHUP", log_config) - - load_log_config() - appbase.register_sighup(sighup) - - # make sure that the first thing we log is a thing we can grep backwards - # for - logging.warn("***** STARTING SERVER *****") - logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse)) - logging.info("Server hostname: %s", config.server_name) - - # It's critical to point twisted's internal logging somewhere, otherwise it - # stacks up and leaks kup to 64K object; - # see: https://twistedmatrix.com/trac/ticket/8164 - # - # Routing to the python logging framework could be a performance problem if - # the handlers blocked for a long time as python.logging is a blocking API - # see https://twistedmatrix.com/documents/current/core/howto/logger.html - # filed as https://github.com/matrix-org/synapse/issues/1727 - # - # However this may not be too much of a problem if we are just writing to a file. + # Route Twisted's native logging through to the standard library logging + # system. observer = STDLibLogObserver() def _log(event): @@ -201,3 +169,54 @@ def setup_logging(config, use_worker_options=False): ) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs") + + +def _reload_stdlib_logging(*args, log_config=None): + logger = logging.getLogger("") + + if not log_config: + logger.warn("Reloaded a blank config?") + + logging.config.dictConfig(log_config) + + +def setup_logging(hs, config, use_worker_options=False): + """ + Set up the logging subsystem. + + Args: + config (LoggingConfig | synapse.config.workers.WorkerConfig): + configuration data + + use_worker_options (bool): True to use the 'worker_log_config' option + instead of 'log_config'. + """ + log_config = config.worker_log_config if use_worker_options else config.log_config + + def read_config(*args, callback=None): + if log_config is None: + return None + + with open(log_config, "rb") as f: + log_config_body = yaml.safe_load(f.read()) + + if callback: + callback(log_config=log_config_body) + logging.info("Reloaded log config from %s due to SIGHUP", log_config) + + return log_config_body + + log_config_body = read_config() + + if log_config_body and log_config_body.get("structured") is True: + setup_structured_logging(hs, config, log_config_body) + appbase.register_sighup(read_config, callback=reload_structured_logging) + else: + _setup_stdlib_logging(config, log_config_body) + appbase.register_sighup(read_config, callback=_reload_stdlib_logging) + + # make sure that the first thing we log is a thing we can grep backwards + # for + logging.warn("***** STARTING SERVER *****") + logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse)) + logging.info("Server hostname: %s", config.server_name) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c86903b98b..94306c94a9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -326,8 +326,9 @@ class FederationHandler(BaseHandler): ours = yield self.store.get_state_groups_ids(room_id, seen) # state_maps is a list of mappings from (type, state_key) to event_id - # type: list[dict[tuple[str, str], str]] - state_maps = list(ours.values()) + state_maps = list( + ours.values() + ) # type: list[dict[tuple[str, str], str]] # we don't need this any more, let's delete it. del ours diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py new file mode 100644 index 0000000000..0367d6dfc4 --- /dev/null +++ b/synapse/logging/_structured.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os.path +import sys +import typing +import warnings + +import attr +from constantly import NamedConstant, Names, ValueConstant, Values +from zope.interface import implementer + +from twisted.logger import ( + FileLogObserver, + FilteringLogObserver, + ILogObserver, + LogBeginner, + Logger, + LogLevel, + LogLevelFilterPredicate, + LogPublisher, + eventAsText, + globalLogBeginner, + jsonFileLogObserver, +) + +from synapse.config._base import ConfigError +from synapse.logging._terse_json import ( + TerseJSONToConsoleLogObserver, + TerseJSONToTCPLogObserver, +) +from synapse.logging.context import LoggingContext + + +def stdlib_log_level_to_twisted(level: str) -> LogLevel: + """ + Convert a stdlib log level to Twisted's log level. + """ + lvl = level.lower().replace("warning", "warn") + return LogLevel.levelWithName(lvl) + + +@attr.s +@implementer(ILogObserver) +class LogContextObserver(object): + """ + An ILogObserver which adds Synapse-specific log context information. + + Attributes: + observer (ILogObserver): The target parent observer. + """ + + observer = attr.ib() + + def __call__(self, event: dict) -> None: + """ + Consume a log event and emit it to the parent observer after filtering + and adding log context information. + + Args: + event (dict) + """ + # Filter out some useless events that Twisted outputs + if "log_text" in event: + if event["log_text"].startswith("DNSDatagramProtocol starting on "): + return + + if event["log_text"].startswith("(UDP Port "): + return + + if event["log_text"].startswith("Timing out client") or event[ + "log_format" + ].startswith("Timing out client"): + return + + context = LoggingContext.current_context() + + # Copy the context information to the log event. + if context is not None: + context.copy_to_twisted_log_entry(event) + else: + # If there's no logging context, not even the root one, we might be + # starting up or it might be from non-Synapse code. Log it as if it + # came from the root logger. + event["request"] = None + event["scope"] = None + + self.observer(event) + + +class PythonStdlibToTwistedLogger(logging.Handler): + """ + Transform a Python stdlib log message into a Twisted one. + """ + + def __init__(self, observer, *args, **kwargs): + """ + Args: + observer (ILogObserver): A Twisted logging observer. + *args, **kwargs: Args/kwargs to be passed to logging.Handler. + """ + self.observer = observer + super().__init__(*args, **kwargs) + + def emit(self, record: logging.LogRecord) -> None: + """ + Emit a record to Twisted's observer. + + Args: + record (logging.LogRecord) + """ + + self.observer( + { + "log_time": record.created, + "log_text": record.getMessage(), + "log_format": "{log_text}", + "log_namespace": record.name, + "log_level": stdlib_log_level_to_twisted(record.levelname), + } + ) + + +def SynapseFileLogObserver(outFile: typing.io.TextIO) -> FileLogObserver: + """ + A log observer that formats events like the traditional log formatter and + sends them to `outFile`. + + Args: + outFile (file object): The file object to write to. + """ + + def formatEvent(_event: dict) -> str: + event = dict(_event) + event["log_level"] = event["log_level"].name.upper() + event["log_format"] = "- {log_namespace} - {log_level} - {request} - " + ( + event.get("log_format", "{log_text}") or "{log_text}" + ) + return eventAsText(event, includeSystem=False) + "\n" + + return FileLogObserver(outFile, formatEvent) + + +class DrainType(Names): + CONSOLE = NamedConstant() + CONSOLE_JSON = NamedConstant() + CONSOLE_JSON_TERSE = NamedConstant() + FILE = NamedConstant() + FILE_JSON = NamedConstant() + NETWORK_JSON_TERSE = NamedConstant() + + +class OutputPipeType(Values): + stdout = ValueConstant(sys.__stdout__) + stderr = ValueConstant(sys.__stderr__) + + +@attr.s +class DrainConfiguration(object): + name = attr.ib() + type = attr.ib() + location = attr.ib() + options = attr.ib(default=None) + + +@attr.s +class NetworkJSONTerseOptions(object): + maximum_buffer = attr.ib(type=int) + + +DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}} + + +def parse_drain_configs( + drains: dict +) -> typing.Generator[DrainConfiguration, None, None]: + """ + Parse the drain configurations. + + Args: + drains (dict): A list of drain configurations. + + Yields: + DrainConfiguration instances. + + Raises: + ConfigError: If any of the drain configuration items are invalid. + """ + for name, config in drains.items(): + if "type" not in config: + raise ConfigError("Logging drains require a 'type' key.") + + try: + logging_type = DrainType.lookupByName(config["type"].upper()) + except ValueError: + raise ConfigError( + "%s is not a known logging drain type." % (config["type"],) + ) + + if logging_type in [ + DrainType.CONSOLE, + DrainType.CONSOLE_JSON, + DrainType.CONSOLE_JSON_TERSE, + ]: + location = config.get("location") + if location is None or location not in ["stdout", "stderr"]: + raise ConfigError( + ( + "The %s drain needs the 'location' key set to " + "either 'stdout' or 'stderr'." + ) + % (logging_type,) + ) + + pipe = OutputPipeType.lookupByName(location).value + + yield DrainConfiguration(name=name, type=logging_type, location=pipe) + + elif logging_type in [DrainType.FILE, DrainType.FILE_JSON]: + if "location" not in config: + raise ConfigError( + "The %s drain needs the 'location' key set." % (logging_type,) + ) + + location = config.get("location") + if os.path.abspath(location) != location: + raise ConfigError( + "File paths need to be absolute, '%s' is a relative path" + % (location,) + ) + yield DrainConfiguration(name=name, type=logging_type, location=location) + + elif logging_type in [DrainType.NETWORK_JSON_TERSE]: + host = config.get("host") + port = config.get("port") + maximum_buffer = config.get("maximum_buffer", 1000) + yield DrainConfiguration( + name=name, + type=logging_type, + location=(host, port), + options=NetworkJSONTerseOptions(maximum_buffer=maximum_buffer), + ) + + else: + raise ConfigError( + "The %s drain type is currently not implemented." + % (config["type"].upper(),) + ) + + +def setup_structured_logging( + hs, + config, + log_config: dict, + logBeginner: LogBeginner = globalLogBeginner, + redirect_stdlib_logging: bool = True, +) -> LogPublisher: + """ + Set up Twisted's structured logging system. + + Args: + hs: The homeserver to use. + config (HomeserverConfig): The configuration of the Synapse homeserver. + log_config (dict): The log configuration to use. + """ + if config.no_redirect_stdio: + raise ConfigError( + "no_redirect_stdio cannot be defined using structured logging." + ) + + logger = Logger() + + if "drains" not in log_config: + raise ConfigError("The logging configuration requires a list of drains.") + + observers = [] + + for observer in parse_drain_configs(log_config["drains"]): + # Pipe drains + if observer.type == DrainType.CONSOLE: + logger.debug( + "Starting up the {name} console logger drain", name=observer.name + ) + observers.append(SynapseFileLogObserver(observer.location)) + elif observer.type == DrainType.CONSOLE_JSON: + logger.debug( + "Starting up the {name} JSON console logger drain", name=observer.name + ) + observers.append(jsonFileLogObserver(observer.location)) + elif observer.type == DrainType.CONSOLE_JSON_TERSE: + logger.debug( + "Starting up the {name} terse JSON console logger drain", + name=observer.name, + ) + observers.append( + TerseJSONToConsoleLogObserver(observer.location, metadata={}) + ) + + # File drains + elif observer.type == DrainType.FILE: + logger.debug("Starting up the {name} file logger drain", name=observer.name) + log_file = open(observer.location, "at", buffering=1, encoding="utf8") + observers.append(SynapseFileLogObserver(log_file)) + elif observer.type == DrainType.FILE_JSON: + logger.debug( + "Starting up the {name} JSON file logger drain", name=observer.name + ) + log_file = open(observer.location, "at", buffering=1, encoding="utf8") + observers.append(jsonFileLogObserver(log_file)) + + elif observer.type == DrainType.NETWORK_JSON_TERSE: + metadata = {"server_name": hs.config.server_name} + log_observer = TerseJSONToTCPLogObserver( + hs=hs, + host=observer.location[0], + port=observer.location[1], + metadata=metadata, + maximum_buffer=observer.options.maximum_buffer, + ) + log_observer.start() + observers.append(log_observer) + else: + # We should never get here, but, just in case, throw an error. + raise ConfigError("%s drain type cannot be configured" % (observer.type,)) + + publisher = LogPublisher(*observers) + log_filter = LogLevelFilterPredicate() + + for namespace, namespace_config in log_config.get( + "loggers", DEFAULT_LOGGERS + ).items(): + # Set the log level for twisted.logger.Logger namespaces + log_filter.setLogLevelForNamespace( + namespace, + stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")), + ) + + # Also set the log levels for the stdlib logger namespaces, to prevent + # them getting to PythonStdlibToTwistedLogger and having to be formatted + if "level" in namespace_config: + logging.getLogger(namespace).setLevel(namespace_config.get("level")) + + f = FilteringLogObserver(publisher, [log_filter]) + lco = LogContextObserver(f) + + if redirect_stdlib_logging: + stuff_into_twisted = PythonStdlibToTwistedLogger(lco) + stdliblogger = logging.getLogger() + stdliblogger.addHandler(stuff_into_twisted) + + # Always redirect standard I/O, otherwise other logging outputs might miss + # it. + logBeginner.beginLoggingTo([lco], redirectStandardIO=True) + + return publisher + + +def reload_structured_logging(*args, log_config=None) -> None: + warnings.warn( + "Currently the structured logging system can not be reloaded, doing nothing" + ) diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py new file mode 100644 index 0000000000..7f1e8f23fe --- /dev/null +++ b/synapse/logging/_terse_json.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Log formatters that output terse JSON. +""" + +import sys +from collections import deque +from ipaddress import IPv4Address, IPv6Address, ip_address +from math import floor +from typing.io import TextIO + +import attr +from simplejson import dumps + +from twisted.application.internet import ClientService +from twisted.internet.endpoints import ( + HostnameEndpoint, + TCP4ClientEndpoint, + TCP6ClientEndpoint, +) +from twisted.internet.protocol import Factory, Protocol +from twisted.logger import FileLogObserver, Logger +from twisted.python.failure import Failure + + +def flatten_event(event: dict, metadata: dict, include_time: bool = False): + """ + Flatten a Twisted logging event to an dictionary capable of being sent + as a log event to a logging aggregation system. + + The format is vastly simplified and is not designed to be a "human readable + string" in the sense that traditional logs are. Instead, the structure is + optimised for searchability and filtering, with human-understandable log + keys. + + Args: + event (dict): The Twisted logging event we are flattening. + metadata (dict): Additional data to include with each log message. This + can be information like the server name. Since the target log + consumer does not know who we are other than by host IP, this + allows us to forward through static information. + include_time (bool): Should we include the `time` key? If False, the + event time is stripped from the event. + """ + new_event = {} + + # If it's a failure, make the new event's log_failure be the traceback text. + if "log_failure" in event: + new_event["log_failure"] = event["log_failure"].getTraceback() + + # If it's a warning, copy over a string representation of the warning. + if "warning" in event: + new_event["warning"] = str(event["warning"]) + + # Stdlib logging events have "log_text" as their human-readable portion, + # Twisted ones have "log_format". For now, include the log_format, so that + # context only given in the log format (e.g. what is being logged) is + # available. + if "log_text" in event: + new_event["log"] = event["log_text"] + else: + new_event["log"] = event["log_format"] + + # We want to include the timestamp when forwarding over the network, but + # exclude it when we are writing to stdout. This is because the log ingester + # (e.g. logstash, fluentd) can add its own timestamp. + if include_time: + new_event["time"] = round(event["log_time"], 2) + + # Convert the log level to a textual representation. + new_event["level"] = event["log_level"].name.upper() + + # Ignore these keys, and do not transfer them over to the new log object. + # They are either useless (isError), transferred manually above (log_time, + # log_level, etc), or contain Python objects which are not useful for output + # (log_logger, log_source). + keys_to_delete = [ + "isError", + "log_failure", + "log_format", + "log_level", + "log_logger", + "log_source", + "log_system", + "log_time", + "log_text", + "observer", + "warning", + ] + + # If it's from the Twisted legacy logger (twisted.python.log), it adds some + # more keys we want to purge. + if event.get("log_namespace") == "log_legacy": + keys_to_delete.extend(["message", "system", "time"]) + + # Rather than modify the dictionary in place, construct a new one with only + # the content we want. The original event should be considered 'frozen'. + for key in event.keys(): + + if key in keys_to_delete: + continue + + if isinstance(event[key], (str, int, bool, float)) or event[key] is None: + # If it's a plain type, include it as is. + new_event[key] = event[key] + else: + # If it's not one of those basic types, write out a string + # representation. This should probably be a warning in development, + # so that we are sure we are only outputting useful data. + new_event[key] = str(event[key]) + + # Add the metadata information to the event (e.g. the server_name). + new_event.update(metadata) + + return new_event + + +def TerseJSONToConsoleLogObserver(outFile: TextIO, metadata: dict) -> FileLogObserver: + """ + A log observer that formats events to a flattened JSON representation. + + Args: + outFile: The file object to write to. + metadata: Metadata to be added to each log object. + """ + + def formatEvent(_event: dict) -> str: + flattened = flatten_event(_event, metadata) + return dumps(flattened, ensure_ascii=False, separators=(",", ":")) + "\n" + + return FileLogObserver(outFile, formatEvent) + + +@attr.s +class TerseJSONToTCPLogObserver(object): + """ + An IObserver that writes JSON logs to a TCP target. + + Args: + hs (HomeServer): The Homeserver that is being logged for. + host: The host of the logging target. + port: The logging target's port. + metadata: Metadata to be added to each log entry. + """ + + hs = attr.ib() + host = attr.ib(type=str) + port = attr.ib(type=int) + metadata = attr.ib(type=dict) + maximum_buffer = attr.ib(type=int) + _buffer = attr.ib(default=attr.Factory(deque), type=deque) + _writer = attr.ib(default=None) + _logger = attr.ib(default=attr.Factory(Logger)) + + def start(self) -> None: + + # Connect without DNS lookups if it's a direct IP. + try: + ip = ip_address(self.host) + if isinstance(ip, IPv4Address): + endpoint = TCP4ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + elif isinstance(ip, IPv6Address): + endpoint = TCP6ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + except ValueError: + endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) + + factory = Factory.forProtocol(Protocol) + self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) + self._service.startService() + + def _write_loop(self) -> None: + """ + Implement the write loop. + """ + if self._writer: + return + + self._writer = self._service.whenConnected() + + @self._writer.addBoth + def writer(r): + if isinstance(r, Failure): + r.printTraceback(file=sys.__stderr__) + self._writer = None + self.hs.get_reactor().callLater(1, self._write_loop) + return + + try: + for event in self._buffer: + r.transport.write( + dumps(event, ensure_ascii=False, separators=(",", ":")).encode( + "utf8" + ) + ) + r.transport.write(b"\n") + self._buffer.clear() + except Exception as e: + sys.__stderr__.write("Failed writing out logs with %s\n" % (str(e),)) + + self._writer = False + self.hs.get_reactor().callLater(1, self._write_loop) + + def _handle_pressure(self) -> None: + """ + Handle backpressure by shedding events. + + The buffer will, in this order, until the buffer is below the maximum: + - Shed DEBUG events + - Shed INFO events + - Shed the middle 50% of the events. + """ + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out DEBUGs + self._buffer = deque( + filter(lambda event: event["level"] != "DEBUG", self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out INFOs + self._buffer = deque( + filter(lambda event: event["level"] != "INFO", self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Cut the middle entries out + buffer_split = floor(self.maximum_buffer / 2) + + old_buffer = self._buffer + self._buffer = deque() + + for i in range(buffer_split): + self._buffer.append(old_buffer.popleft()) + + end_buffer = [] + for i in range(buffer_split): + end_buffer.append(old_buffer.pop()) + + self._buffer.extend(reversed(end_buffer)) + + def __call__(self, event: dict) -> None: + flattened = flatten_event(event, self.metadata, include_time=True) + self._buffer.append(flattened) + + # Handle backpressure, if it exists. + try: + self._handle_pressure() + except Exception: + # If handling backpressure fails,clear the buffer and log the + # exception. + self._buffer.clear() + self._logger.failure("Failed clearing backpressure") + + # Try and write immediately. + self._write_loop() diff --git a/synapse/logging/context.py b/synapse/logging/context.py index b456c31f70..63379bfb93 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -25,6 +25,7 @@ See doc/log_contexts.rst for details on how this works. import logging import threading import types +from typing import Any, List from twisted.internet import defer, threads @@ -194,7 +195,7 @@ class LoggingContext(object): class Sentinel(object): """Sentinel to represent the root context""" - __slots__ = [] + __slots__ = [] # type: List[Any] def __str__(self): return "sentinel" @@ -202,6 +203,10 @@ class LoggingContext(object): def copy_to(self, record): pass + def copy_to_twisted_log_entry(self, record): + record["request"] = None + record["scope"] = None + def start(self): pass @@ -330,6 +335,13 @@ class LoggingContext(object): # we also track the current scope: record.scope = self.scope + def copy_to_twisted_log_entry(self, record): + """ + Copy logging fields from this context to a Twisted log record. + """ + record["request"] = self.request + record["scope"] = self.scope + def start(self): if get_thread_id() != self.main_thread: logger.warning("Started logcontext %s on different thread", self) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index c6465c0386..ec0ac547c1 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -47,9 +47,9 @@ REQUIREMENTS = [ "idna>=2.5", # validating SSL certs for IP addresses requires service_identity 18.1. "service_identity>=18.1.0", - # our logcontext handling relies on the ability to cancel inlineCallbacks - # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7. - "Twisted>=18.7.0", + # Twisted 18.9 introduces some logger improvements that the structured + # logger utilises + "Twisted>=18.9.0", "treq>=15.1", # Twisted has required pyopenssl 16.0 since about Twisted 16.6. "pyopenssl>=16.0.0", diff --git a/tests/logging/__init__.py b/tests/logging/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/logging/test_structured.py b/tests/logging/test_structured.py new file mode 100644 index 0000000000..a786de0233 --- /dev/null +++ b/tests/logging/test_structured.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import os.path +import shutil +import sys +import textwrap + +from twisted.logger import Logger, eventAsText, eventsFromJSONLogFile + +from synapse.config.logger import setup_logging +from synapse.logging._structured import setup_structured_logging +from synapse.logging.context import LoggingContext + +from tests.unittest import DEBUG, HomeserverTestCase + + +class FakeBeginner(object): + def beginLoggingTo(self, observers, **kwargs): + self.observers = observers + + +class StructuredLoggingTestCase(HomeserverTestCase): + """ + Tests for Synapse's structured logging support. + """ + + def test_output_to_json_round_trip(self): + """ + Synapse logs can be outputted to JSON and then read back again. + """ + temp_dir = self.mktemp() + os.mkdir(temp_dir) + self.addCleanup(shutil.rmtree, temp_dir) + + json_log_file = os.path.abspath(os.path.join(temp_dir, "out.json")) + + log_config = { + "drains": {"jsonfile": {"type": "file_json", "location": json_log_file}} + } + + # Begin the logger with our config + beginner = FakeBeginner() + setup_structured_logging( + self.hs, self.hs.config, log_config, logBeginner=beginner + ) + + # Make a logger and send an event + logger = Logger( + namespace="tests.logging.test_structured", observer=beginner.observers[0] + ) + logger.info("Hello there, {name}!", name="wally") + + # Read the log file and check it has the event we sent + with open(json_log_file, "r") as f: + logged_events = list(eventsFromJSONLogFile(f)) + self.assertEqual(len(logged_events), 1) + + # The event pulled from the file should render fine + self.assertEqual( + eventAsText(logged_events[0], includeTimestamp=False), + "[tests.logging.test_structured#info] Hello there, wally!", + ) + + def test_output_to_text(self): + """ + Synapse logs can be outputted to text. + """ + temp_dir = self.mktemp() + os.mkdir(temp_dir) + self.addCleanup(shutil.rmtree, temp_dir) + + log_file = os.path.abspath(os.path.join(temp_dir, "out.log")) + + log_config = {"drains": {"file": {"type": "file", "location": log_file}}} + + # Begin the logger with our config + beginner = FakeBeginner() + setup_structured_logging( + self.hs, self.hs.config, log_config, logBeginner=beginner + ) + + # Make a logger and send an event + logger = Logger( + namespace="tests.logging.test_structured", observer=beginner.observers[0] + ) + logger.info("Hello there, {name}!", name="wally") + + # Read the log file and check it has the event we sent + with open(log_file, "r") as f: + logged_events = f.read().strip().split("\n") + self.assertEqual(len(logged_events), 1) + + # The event pulled from the file should render fine + self.assertTrue( + logged_events[0].endswith( + " - tests.logging.test_structured - INFO - None - Hello there, wally!" + ) + ) + + def test_collects_logcontext(self): + """ + Test that log outputs have the attached logging context. + """ + log_config = {"drains": {}} + + # Begin the logger with our config + beginner = FakeBeginner() + publisher = setup_structured_logging( + self.hs, self.hs.config, log_config, logBeginner=beginner + ) + + logs = [] + + publisher.addObserver(logs.append) + + # Make a logger and send an event + logger = Logger( + namespace="tests.logging.test_structured", observer=beginner.observers[0] + ) + + with LoggingContext("testcontext", request="somereq"): + logger.info("Hello there, {name}!", name="steve") + + self.assertEqual(len(logs), 1) + self.assertEqual(logs[0]["request"], "somereq") + + +class StructuredLoggingConfigurationFileTestCase(HomeserverTestCase): + def make_homeserver(self, reactor, clock): + + tempdir = self.mktemp() + os.mkdir(tempdir) + log_config_file = os.path.abspath(os.path.join(tempdir, "log.config.yaml")) + self.homeserver_log = os.path.abspath(os.path.join(tempdir, "homeserver.log")) + + config = self.default_config() + config["log_config"] = log_config_file + + with open(log_config_file, "w") as f: + f.write( + textwrap.dedent( + """\ + structured: true + + drains: + file: + type: file_json + location: %s + """ + % (self.homeserver_log,) + ) + ) + + self.addCleanup(self._sys_cleanup) + + return self.setup_test_homeserver(config=config) + + def _sys_cleanup(self): + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + + # Do not remove! We need the logging system to be set other than WARNING. + @DEBUG + def test_log_output(self): + """ + When a structured logging config is given, Synapse will use it. + """ + setup_logging(self.hs, self.hs.config) + + # Make a logger and send an event + logger = Logger(namespace="tests.logging.test_structured") + + with LoggingContext("testcontext", request="somereq"): + logger.info("Hello there, {name}!", name="steve") + + with open(self.homeserver_log, "r") as f: + logged_events = [ + eventAsText(x, includeTimestamp=False) for x in eventsFromJSONLogFile(f) + ] + + logs = "\n".join(logged_events) + self.assertTrue("***** STARTING SERVER *****" in logs) + self.assertTrue("Hello there, steve!" in logs) diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py new file mode 100644 index 0000000000..514282591d --- /dev/null +++ b/tests/logging/test_terse_json.py @@ -0,0 +1,234 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from collections import Counter + +from twisted.logger import Logger + +from synapse.logging._structured import setup_structured_logging + +from tests.server import connect_client +from tests.unittest import HomeserverTestCase + +from .test_structured import FakeBeginner + + +class TerseJSONTCPTestCase(HomeserverTestCase): + def test_log_output(self): + """ + The Terse JSON outputter delivers simplified structured logs over TCP. + """ + log_config = { + "drains": { + "tersejson": { + "type": "network_json_terse", + "host": "127.0.0.1", + "port": 8000, + } + } + } + + # Begin the logger with our config + beginner = FakeBeginner() + setup_structured_logging( + self.hs, self.hs.config, log_config, logBeginner=beginner + ) + + logger = Logger( + namespace="tests.logging.test_terse_json", observer=beginner.observers[0] + ) + logger.info("Hello there, {name}!", name="wally") + + # Trigger the connection + self.pump() + + _, server = connect_client(self.reactor, 0) + + # Trigger data being sent + self.pump() + + # One log message, with a single trailing newline + logs = server.data.decode("utf8").splitlines() + self.assertEqual(len(logs), 1) + self.assertEqual(server.data.count(b"\n"), 1) + + log = json.loads(logs[0]) + + # The terse logger should give us these keys. + expected_log_keys = [ + "log", + "time", + "level", + "log_namespace", + "request", + "scope", + "server_name", + "name", + ] + self.assertEqual(set(log.keys()), set(expected_log_keys)) + + # It contains the data we expect. + self.assertEqual(log["name"], "wally") + + def test_log_backpressure_debug(self): + """ + When backpressure is hit, DEBUG logs will be shed. + """ + log_config = { + "loggers": {"synapse": {"level": "DEBUG"}}, + "drains": { + "tersejson": { + "type": "network_json_terse", + "host": "127.0.0.1", + "port": 8000, + "maximum_buffer": 10, + } + }, + } + + # Begin the logger with our config + beginner = FakeBeginner() + setup_structured_logging( + self.hs, + self.hs.config, + log_config, + logBeginner=beginner, + redirect_stdlib_logging=False, + ) + + logger = Logger( + namespace="synapse.logging.test_terse_json", observer=beginner.observers[0] + ) + + # Send some debug messages + for i in range(0, 3): + logger.debug("debug %s" % (i,)) + + # Send a bunch of useful messages + for i in range(0, 7): + logger.info("test message %s" % (i,)) + + # The last debug message pushes it past the maximum buffer + logger.debug("too much debug") + + # Allow the reconnection + _, server = connect_client(self.reactor, 0) + self.pump() + + # Only the 7 infos made it through, the debugs were elided + logs = server.data.splitlines() + self.assertEqual(len(logs), 7) + + def test_log_backpressure_info(self): + """ + When backpressure is hit, DEBUG and INFO logs will be shed. + """ + log_config = { + "loggers": {"synapse": {"level": "DEBUG"}}, + "drains": { + "tersejson": { + "type": "network_json_terse", + "host": "127.0.0.1", + "port": 8000, + "maximum_buffer": 10, + } + }, + } + + # Begin the logger with our config + beginner = FakeBeginner() + setup_structured_logging( + self.hs, + self.hs.config, + log_config, + logBeginner=beginner, + redirect_stdlib_logging=False, + ) + + logger = Logger( + namespace="synapse.logging.test_terse_json", observer=beginner.observers[0] + ) + + # Send some debug messages + for i in range(0, 3): + logger.debug("debug %s" % (i,)) + + # Send a bunch of useful messages + for i in range(0, 10): + logger.warn("test warn %s" % (i,)) + + # Send a bunch of info messages + for i in range(0, 3): + logger.info("test message %s" % (i,)) + + # The last debug message pushes it past the maximum buffer + logger.debug("too much debug") + + # Allow the reconnection + client, server = connect_client(self.reactor, 0) + self.pump() + + # The 10 warnings made it through, the debugs and infos were elided + logs = list(map(json.loads, server.data.decode("utf8").splitlines())) + self.assertEqual(len(logs), 10) + + self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10}) + + def test_log_backpressure_cut_middle(self): + """ + When backpressure is hit, and no more DEBUG and INFOs cannot be culled, + it will cut the middle messages out. + """ + log_config = { + "loggers": {"synapse": {"level": "DEBUG"}}, + "drains": { + "tersejson": { + "type": "network_json_terse", + "host": "127.0.0.1", + "port": 8000, + "maximum_buffer": 10, + } + }, + } + + # Begin the logger with our config + beginner = FakeBeginner() + setup_structured_logging( + self.hs, + self.hs.config, + log_config, + logBeginner=beginner, + redirect_stdlib_logging=False, + ) + + logger = Logger( + namespace="synapse.logging.test_terse_json", observer=beginner.observers[0] + ) + + # Send a bunch of useful messages + for i in range(0, 20): + logger.warn("test warn", num=i) + + # Allow the reconnection + client, server = connect_client(self.reactor, 0) + self.pump() + + # The first five and last five warnings made it through, the debugs and + # infos were elided + logs = list(map(json.loads, server.data.decode("utf8").splitlines())) + self.assertEqual(len(logs), 10) + self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10}) + self.assertEqual([0, 1, 2, 3, 4, 15, 16, 17, 18, 19], [x["num"] for x in logs]) diff --git a/tests/server.py b/tests/server.py index e573c4e4c5..c8269619b1 100644 --- a/tests/server.py +++ b/tests/server.py @@ -11,9 +11,13 @@ from twisted.internet import address, threads, udp from twisted.internet._resolver import SimpleResolverComplexifier from twisted.internet.defer import Deferred, fail, succeed from twisted.internet.error import DNSLookupError -from twisted.internet.interfaces import IReactorPluggableNameResolver, IResolverSimple +from twisted.internet.interfaces import ( + IReactorPluggableNameResolver, + IReactorTCP, + IResolverSimple, +) from twisted.python.failure import Failure -from twisted.test.proto_helpers import MemoryReactorClock +from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock from twisted.web.http import unquote from twisted.web.http_headers import Headers @@ -465,3 +469,22 @@ class FakeTransport(object): self.buffer = self.buffer[len(to_write) :] if self.buffer and self.autoflush: self._reactor.callLater(0.0, self.flush) + + +def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol: + """ + Connect a client to a fake TCP transport. + + Args: + reactor + factory: The connecting factory to build. + """ + factory = reactor.tcpClients[client_id][2] + client = factory.buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, reactor)) + client.makeConnection(FakeTransport(server, reactor)) + + reactor.tcpClients.pop(client_id) + + return client, server diff --git a/tox.ini b/tox.ini index 09b4b8fc3c..f9a3b7e49a 100644 --- a/tox.ini +++ b/tox.ini @@ -146,3 +146,13 @@ commands = coverage combine coverage xml codecov -X gcov + +[testenv:mypy] +basepython = python3.5 +deps = + {[base]deps} + mypy +extras = all +commands = mypy --ignore-missing-imports \ + synapse/logging/_structured.py \ + synapse/logging/_terse_json.py \ No newline at end of file -- cgit 1.4.1 From 6d97843793d59bc5d307475a6a6185ff107e116b Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Wed, 28 Aug 2019 13:12:22 +0100 Subject: Config templating (#5900) Template config files * Imagine a system composed entirely of x, y, z etc and the basic operations.. Wait George, why XOR? Why not just neq? George: Eh, I didn't think of that.. Co-Authored-By: Erik Johnston --- changelog.d/5900.feature | 1 + docs/sample_config.yaml | 16 +++---- synapse/config/_base.py | 37 ++++++++++++++++ synapse/config/database.py | 27 +++++++---- synapse/config/server.py | 84 ++++++++++++++++++++++++++++------- synapse/config/tls.py | 50 ++++++++++++++++----- tests/config/test_database.py | 52 ++++++++++++++++++++++ tests/config/test_server.py | 101 +++++++++++++++++++++++++++++++++++++++++- tests/config/test_tls.py | 44 ++++++++++++++++++ 9 files changed, 366 insertions(+), 46 deletions(-) create mode 100644 changelog.d/5900.feature create mode 100644 tests/config/test_database.py diff --git a/changelog.d/5900.feature b/changelog.d/5900.feature new file mode 100644 index 0000000000..b62d88a76b --- /dev/null +++ b/changelog.d/5900.feature @@ -0,0 +1 @@ +Add support for config templating. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ae1cafc5f3..6da1167632 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -205,9 +205,9 @@ listeners: # - port: 8008 tls: false - bind_addresses: ['::1', '127.0.0.1'] type: http x_forwarded: true + bind_addresses: ['::1', '127.0.0.1'] resources: - names: [client, federation] @@ -392,10 +392,10 @@ listeners: # permission to listen on port 80. # acme: - # ACME support is disabled by default. Uncomment the following line - # (and tls_certificate_path and tls_private_key_path above) to enable it. + # ACME support is disabled by default. Set this to `true` and uncomment + # tls_certificate_path and tls_private_key_path above to enable it. # - #enabled: true + enabled: False # Endpoint to use to request certificates. If you only want to test, # use Let's Encrypt's staging url: @@ -406,17 +406,17 @@ acme: # Port number to listen on for the HTTP-01 challenge. Change this if # you are forwarding connections through Apache/Nginx/etc. # - #port: 80 + port: 80 # Local addresses to listen on for incoming connections. # Again, you may want to change this if you are forwarding connections # through Apache/Nginx/etc. # - #bind_addresses: ['::', '0.0.0.0'] + bind_addresses: ['::', '0.0.0.0'] # How many days remaining on a certificate before it is renewed. # - #reprovision_threshold: 30 + reprovision_threshold: 30 # The domain that the certificate should be for. Normally this # should be the same as your Matrix domain (i.e., 'server_name'), but, @@ -430,7 +430,7 @@ acme: # # If not set, defaults to your 'server_name'. # - #domain: matrix.example.com + domain: matrix.example.com # file to use for the account key. This will be generated if it doesn't # exist. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 6ce5cd07fb..31f6530978 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -181,6 +181,11 @@ class Config(object): generate_secrets=False, report_stats=None, open_private_ports=False, + listeners=None, + database_conf=None, + tls_certificate_path=None, + tls_private_key_path=None, + acme_domain=None, ): """Build a default configuration file @@ -207,6 +212,33 @@ class Config(object): open_private_ports (bool): True to leave private ports (such as the non-TLS HTTP listener) open to the internet. + listeners (list(dict)|None): A list of descriptions of the listeners + synapse should start with each of which specifies a port (str), a list of + resources (list(str)), tls (bool) and type (str). For example: + [{ + "port": 8448, + "resources": [{"names": ["federation"]}], + "tls": True, + "type": "http", + }, + { + "port": 443, + "resources": [{"names": ["client"]}], + "tls": False, + "type": "http", + }], + + + database (str|None): The database type to configure, either `psycog2` + or `sqlite3`. + + tls_certificate_path (str|None): The path to the tls certificate. + + tls_private_key_path (str|None): The path to the tls private key. + + acme_domain (str|None): The domain acme will try to validate. If + specified acme will be enabled. + Returns: str: the yaml config file """ @@ -220,6 +252,11 @@ class Config(object): generate_secrets=generate_secrets, report_stats=report_stats, open_private_ports=open_private_ports, + listeners=listeners, + database_conf=database_conf, + tls_certificate_path=tls_certificate_path, + tls_private_key_path=tls_private_key_path, + acme_domain=acme_domain, ) ) diff --git a/synapse/config/database.py b/synapse/config/database.py index 746a6cd1f4..118aafbd4a 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import os +from textwrap import indent + +import yaml from ._base import Config @@ -38,20 +41,28 @@ class DatabaseConfig(Config): self.set_databasepath(config.get("database_path")) - def generate_config_section(self, data_dir_path, **kwargs): - database_path = os.path.join(data_dir_path, "homeserver.db") - return ( - """\ - ## Database ## - - database: - # The database engine name + def generate_config_section(self, data_dir_path, database_conf, **kwargs): + if not database_conf: + database_path = os.path.join(data_dir_path, "homeserver.db") + database_conf = ( + """# The database engine name name: "sqlite3" # Arguments to pass to the engine args: # Path to the database database: "%(database_path)s" + """ + % locals() + ) + else: + database_conf = indent(yaml.dump(database_conf), " " * 10).lstrip() + + return ( + """\ + ## Database ## + database: + %(database_conf)s # Number of events to cache in memory. # #event_cache_size: 10K diff --git a/synapse/config/server.py b/synapse/config/server.py index 15449695d1..2abdef0971 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -17,8 +17,11 @@ import logging import os.path +import re +from textwrap import indent import attr +import yaml from netaddr import IPSet from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -352,7 +355,7 @@ class ServerConfig(Config): return any(l["tls"] for l in self.listeners) def generate_config_section( - self, server_name, data_dir_path, open_private_ports, **kwargs + self, server_name, data_dir_path, open_private_ports, listeners, **kwargs ): _, bind_port = parse_and_validate_server_name(server_name) if bind_port is not None: @@ -366,11 +369,68 @@ class ServerConfig(Config): # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the # default config string default_room_version = DEFAULT_ROOM_VERSION + secure_listeners = [] + unsecure_listeners = [] + private_addresses = ["::1", "127.0.0.1"] + if listeners: + for listener in listeners: + if listener["tls"]: + secure_listeners.append(listener) + else: + # If we don't want open ports we need to bind the listeners + # to some address other than 0.0.0.0. Here we chose to use + # localhost. + # If the addresses are already bound we won't overwrite them + # however. + if not open_private_ports: + listener.setdefault("bind_addresses", private_addresses) + + unsecure_listeners.append(listener) + + secure_http_bindings = indent( + yaml.dump(secure_listeners), " " * 10 + ).lstrip() + + unsecure_http_bindings = indent( + yaml.dump(unsecure_listeners), " " * 10 + ).lstrip() + + if not unsecure_listeners: + unsecure_http_bindings = ( + """- port: %(unsecure_port)s + tls: false + type: http + x_forwarded: true""" + % locals() + ) + + if not open_private_ports: + unsecure_http_bindings += ( + "\n bind_addresses: ['::1', '127.0.0.1']" + ) + + unsecure_http_bindings += """ + + resources: + - names: [client, federation] + compress: false""" + + if listeners: + # comment out this block + unsecure_http_bindings = "#" + re.sub( + "\n {10}", + lambda match: match.group(0) + "#", + unsecure_http_bindings, + ) - unsecure_http_binding = "port: %i\n tls: false" % (unsecure_port,) - if not open_private_ports: - unsecure_http_binding += ( - "\n bind_addresses: ['::1', '127.0.0.1']" + if not secure_listeners: + secure_http_bindings = ( + """#- port: %(bind_port)s + # type: http + # tls: true + # resources: + # - names: [client, federation]""" + % locals() ) return ( @@ -556,11 +616,7 @@ class ServerConfig(Config): # will also need to give Synapse a TLS key and certificate: see the TLS section # below.) # - #- port: %(bind_port)s - # type: http - # tls: true - # resources: - # - names: [client, federation] + %(secure_http_bindings)s # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy # that unwraps TLS. @@ -568,13 +624,7 @@ class ServerConfig(Config): # If you plan to use a reverse proxy, please see # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst. # - - %(unsecure_http_binding)s - type: http - x_forwarded: true - - resources: - - names: [client, federation] - compress: false + %(unsecure_http_bindings)s # example additional_resources: # diff --git a/synapse/config/tls.py b/synapse/config/tls.py index ca508a224f..c0148aa95c 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -239,12 +239,38 @@ class TlsConfig(Config): self.tls_fingerprints.append({"sha256": sha256_fingerprint}) def generate_config_section( - self, config_dir_path, server_name, data_dir_path, **kwargs + self, + config_dir_path, + server_name, + data_dir_path, + tls_certificate_path, + tls_private_key_path, + acme_domain, + **kwargs ): + """If the acme_domain is specified acme will be enabled. + If the TLS paths are not specified the default will be certs in the + config directory""" + base_key_name = os.path.join(config_dir_path, server_name) - tls_certificate_path = base_key_name + ".tls.crt" - tls_private_key_path = base_key_name + ".tls.key" + if bool(tls_certificate_path) != bool(tls_private_key_path): + raise ConfigError( + "Please specify both a cert path and a key path or neither." + ) + + tls_enabled = ( + "" if tls_certificate_path and tls_private_key_path or acme_domain else "#" + ) + + if not tls_certificate_path: + tls_certificate_path = base_key_name + ".tls.crt" + if not tls_private_key_path: + tls_private_key_path = base_key_name + ".tls.key" + + acme_enabled = bool(acme_domain) + acme_domain = "matrix.example.com" + default_acme_account_file = os.path.join(data_dir_path, "acme_account.key") # this is to avoid the max line length. Sorrynotsorry @@ -269,11 +295,11 @@ class TlsConfig(Config): # instance, if using certbot, use `fullchain.pem` as your certificate, # not `cert.pem`). # - #tls_certificate_path: "%(tls_certificate_path)s" + %(tls_enabled)stls_certificate_path: "%(tls_certificate_path)s" # PEM-encoded private key for TLS # - #tls_private_key_path: "%(tls_private_key_path)s" + %(tls_enabled)stls_private_key_path: "%(tls_private_key_path)s" # Whether to verify TLS server certificates for outbound federation requests. # @@ -340,10 +366,10 @@ class TlsConfig(Config): # permission to listen on port 80. # acme: - # ACME support is disabled by default. Uncomment the following line - # (and tls_certificate_path and tls_private_key_path above) to enable it. + # ACME support is disabled by default. Set this to `true` and uncomment + # tls_certificate_path and tls_private_key_path above to enable it. # - #enabled: true + enabled: %(acme_enabled)s # Endpoint to use to request certificates. If you only want to test, # use Let's Encrypt's staging url: @@ -354,17 +380,17 @@ class TlsConfig(Config): # Port number to listen on for the HTTP-01 challenge. Change this if # you are forwarding connections through Apache/Nginx/etc. # - #port: 80 + port: 80 # Local addresses to listen on for incoming connections. # Again, you may want to change this if you are forwarding connections # through Apache/Nginx/etc. # - #bind_addresses: ['::', '0.0.0.0'] + bind_addresses: ['::', '0.0.0.0'] # How many days remaining on a certificate before it is renewed. # - #reprovision_threshold: 30 + reprovision_threshold: 30 # The domain that the certificate should be for. Normally this # should be the same as your Matrix domain (i.e., 'server_name'), but, @@ -378,7 +404,7 @@ class TlsConfig(Config): # # If not set, defaults to your 'server_name'. # - #domain: matrix.example.com + domain: %(acme_domain)s # file to use for the account key. This will be generated if it doesn't # exist. diff --git a/tests/config/test_database.py b/tests/config/test_database.py new file mode 100644 index 0000000000..151d3006ac --- /dev/null +++ b/tests/config/test_database.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from synapse.config.database import DatabaseConfig + +from tests import unittest + + +class DatabaseConfigTestCase(unittest.TestCase): + def test_database_configured_correctly_no_database_conf_param(self): + conf = yaml.safe_load( + DatabaseConfig().generate_config_section("/data_dir_path", None) + ) + + expected_database_conf = { + "name": "sqlite3", + "args": {"database": "/data_dir_path/homeserver.db"}, + } + + self.assertEqual(conf["database"], expected_database_conf) + + def test_database_configured_correctly_database_conf_param(self): + + database_conf = { + "name": "my super fast datastore", + "args": { + "user": "matrix", + "password": "synapse_database_password", + "host": "synapse_database_host", + "database": "matrix", + }, + } + + conf = yaml.safe_load( + DatabaseConfig().generate_config_section("/data_dir_path", database_conf) + ) + + self.assertEqual(conf["database"], database_conf) diff --git a/tests/config/test_server.py b/tests/config/test_server.py index 1ca5ea54ca..a10d017120 100644 --- a/tests/config/test_server.py +++ b/tests/config/test_server.py @@ -13,7 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.config.server import is_threepid_reserved +import yaml + +from synapse.config.server import ServerConfig, is_threepid_reserved from tests import unittest @@ -29,3 +31,100 @@ class ServerConfigTestCase(unittest.TestCase): self.assertTrue(is_threepid_reserved(config, user1)) self.assertFalse(is_threepid_reserved(config, user3)) self.assertFalse(is_threepid_reserved(config, user1_msisdn)) + + def test_unsecure_listener_no_listeners_open_private_ports_false(self): + conf = yaml.safe_load( + ServerConfig().generate_config_section( + "che.org", "/data_dir_path", False, None + ) + ) + + expected_listeners = [ + { + "port": 8008, + "tls": False, + "type": "http", + "x_forwarded": True, + "bind_addresses": ["::1", "127.0.0.1"], + "resources": [{"names": ["client", "federation"], "compress": False}], + } + ] + + self.assertEqual(conf["listeners"], expected_listeners) + + def test_unsecure_listener_no_listeners_open_private_ports_true(self): + conf = yaml.safe_load( + ServerConfig().generate_config_section( + "che.org", "/data_dir_path", True, None + ) + ) + + expected_listeners = [ + { + "port": 8008, + "tls": False, + "type": "http", + "x_forwarded": True, + "resources": [{"names": ["client", "federation"], "compress": False}], + } + ] + + self.assertEqual(conf["listeners"], expected_listeners) + + def test_listeners_set_correctly_open_private_ports_false(self): + listeners = [ + { + "port": 8448, + "resources": [{"names": ["federation"]}], + "tls": True, + "type": "http", + }, + { + "port": 443, + "resources": [{"names": ["client"]}], + "tls": False, + "type": "http", + }, + ] + + conf = yaml.safe_load( + ServerConfig().generate_config_section( + "this.one.listens", "/data_dir_path", True, listeners + ) + ) + + self.assertEqual(conf["listeners"], listeners) + + def test_listeners_set_correctly_open_private_ports_true(self): + listeners = [ + { + "port": 8448, + "resources": [{"names": ["federation"]}], + "tls": True, + "type": "http", + }, + { + "port": 443, + "resources": [{"names": ["client"]}], + "tls": False, + "type": "http", + }, + { + "port": 1243, + "resources": [{"names": ["client"]}], + "tls": False, + "type": "http", + "bind_addresses": ["this_one_is_bound"], + }, + ] + + expected_listeners = listeners.copy() + expected_listeners[1]["bind_addresses"] = ["::1", "127.0.0.1"] + + conf = yaml.safe_load( + ServerConfig().generate_config_section( + "this.one.listens", "/data_dir_path", True, listeners + ) + ) + + self.assertEqual(conf["listeners"], expected_listeners) diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index 4f8a87a3df..8e0c4b9533 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -16,6 +16,8 @@ import os +import yaml + from OpenSSL import SSL from synapse.config.tls import ConfigError, TlsConfig @@ -191,3 +193,45 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1, 0) self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_1, 0) self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_2, 0) + + def test_acme_disabled_in_generated_config_no_acme_domain_provied(self): + """ + Checks acme is disabled by default. + """ + conf = TestConfig() + conf.read_config( + yaml.safe_load( + TestConfig().generate_config_section( + "/config_dir_path", + "my_super_secure_server", + "/data_dir_path", + "/tls_cert_path", + "tls_private_key", + None, # This is the acme_domain + ) + ), + "/config_dir_path", + ) + + self.assertFalse(conf.acme_enabled) + + def test_acme_enabled_in_generated_config_domain_provided(self): + """ + Checks acme is enabled if the acme_domain arg is set to some string. + """ + conf = TestConfig() + conf.read_config( + yaml.safe_load( + TestConfig().generate_config_section( + "/config_dir_path", + "my_super_secure_server", + "/data_dir_path", + "/tls_cert_path", + "tls_private_key", + "my_supe_secure_server", # This is the acme_domain + ) + ), + "/config_dir_path", + ) + + self.assertTrue(conf.acme_enabled) -- cgit 1.4.1 From 71fc04069a5770a204c3514e0237d7374df257a8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 28 Aug 2019 14:59:26 +0200 Subject: Use the v2 lookup API for 3PID invites (#5897) Fixes https://github.com/matrix-org/synapse/issues/5861 Adds support for the v2 lookup API as defined in [MSC2134](https://github.com/matrix-org/matrix-doc/pull/2134). Currently this is only used for 3PID invites. Sytest PR: https://github.com/matrix-org/sytest/pull/679 --- changelog.d/5897.feature | 1 + synapse/handlers/identity.py | 13 ++++ synapse/handlers/room_member.py | 128 +++++++++++++++++++++++++++++++++++++--- synapse/util/hash.py | 33 +++++++++++ 4 files changed, 166 insertions(+), 9 deletions(-) create mode 100644 changelog.d/5897.feature create mode 100644 synapse/util/hash.py diff --git a/changelog.d/5897.feature b/changelog.d/5897.feature new file mode 100644 index 0000000000..7b10774c96 --- /dev/null +++ b/changelog.d/5897.feature @@ -0,0 +1 @@ +Switch to the v2 lookup API for 3PID invites. \ No newline at end of file diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index d199521b58..97daca5fee 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -282,3 +282,16 @@ class IdentityHandler(BaseHandler): except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() + + +class LookupAlgorithm: + """ + Supported hashing algorithms when performing a 3PID lookup. + + SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64 + encoding + NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext + """ + + SHA256 = "sha256" + NONE = "none" diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 249a6d9c5d..4605cb9c0b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -29,9 +29,11 @@ from twisted.internet import defer from synapse import types from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError +from synapse.handlers.identity import LookupAlgorithm from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room +from synapse.util.hash import sha256_and_url_safe_base64 from ._base import BaseHandler @@ -523,7 +525,7 @@ class RoomMemberHandler(object): event (SynapseEvent): The membership event. context: The context of the event. is_guest (bool): Whether the sender is a guest. - room_hosts ([str]): Homeservers which are likely to already be in + remote_room_hosts (list[str]|None): Homeservers which are likely to already be in the room, and could be danced with in order to join this homeserver for the first time. ratelimit (bool): Whether to rate limit this request. @@ -634,7 +636,7 @@ class RoomMemberHandler(object): servers.remove(room_alias.domain) servers.insert(0, room_alias.domain) - return (RoomID.from_string(room_id), servers) + return RoomID.from_string(room_id), servers @defer.inlineCallbacks def _get_inviter(self, user_id, room_id): @@ -697,6 +699,44 @@ class RoomMemberHandler(object): raise SynapseError( 403, "Looking up third-party identifiers is denied from this server" ) + + # Check what hashing details are supported by this identity server + use_v1 = False + hash_details = None + try: + hash_details = yield self.simple_http_client.get_json( + "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server) + ) + except (HttpResponseException, ValueError) as e: + # Catch HttpResponseExcept for a non-200 response code + # Catch ValueError for non-JSON response body + + # Check if this identity server does not know about v2 lookups + if e.code == 404: + # This is an old identity server that does not yet support v2 lookups + use_v1 = True + else: + logger.warn("Error when looking up hashing details: %s" % (e,)) + return None + + if use_v1: + return (yield self._lookup_3pid_v1(id_server, medium, address)) + + return (yield self._lookup_3pid_v2(id_server, medium, address, hash_details)) + + @defer.inlineCallbacks + def _lookup_3pid_v1(self, id_server, medium, address): + """Looks up a 3pid in the passed identity server using v1 lookup. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + medium (str): The type of the third party identifier (e.g. "email"). + address (str): The third party identifier (e.g. "foo@example.com"). + + Returns: + str: the matrix ID of the 3pid, or None if it is not recognized. + """ try: data = yield self.simple_http_client.get_json( "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), @@ -711,8 +751,83 @@ class RoomMemberHandler(object): except IOError as e: logger.warn("Error from identity server lookup: %s" % (e,)) + + return None + + @defer.inlineCallbacks + def _lookup_3pid_v2(self, id_server, medium, address, hash_details): + """Looks up a 3pid in the passed identity server using v2 lookup. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + medium (str): The type of the third party identifier (e.g. "email"). + address (str): The third party identifier (e.g. "foo@example.com"). + hash_details (dict[str, str|list]): A dictionary containing hashing information + provided by an identity server. + + Returns: + Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised. + """ + # Extract information from hash_details + supported_lookup_algorithms = hash_details["algorithms"] + lookup_pepper = hash_details["lookup_pepper"] + + # Check if any of the supported lookup algorithms are present + if LookupAlgorithm.SHA256 in supported_lookup_algorithms: + # Perform a hashed lookup + lookup_algorithm = LookupAlgorithm.SHA256 + + # Hash address, medium and the pepper with sha256 + to_hash = "%s %s %s" % (address, medium, lookup_pepper) + lookup_value = sha256_and_url_safe_base64(to_hash) + + elif LookupAlgorithm.NONE in supported_lookup_algorithms: + # Perform a non-hashed lookup + lookup_algorithm = LookupAlgorithm.NONE + + # Combine together plaintext address and medium + lookup_value = "%s %s" % (address, medium) + + else: + logger.warn( + "None of the provided lookup algorithms of %s%s are supported: %s", + id_server_scheme, + id_server, + hash_details["algorithms"], + ) + raise SynapseError( + 400, + "Provided identity server does not support any v2 lookup " + "algorithms that this homeserver supports.", + ) + + try: + lookup_results = yield self.simple_http_client.post_json_get_json( + "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), + { + "addresses": [lookup_value], + "algorithm": lookup_algorithm, + "pepper": lookup_pepper, + }, + ) + except (HttpResponseException, ValueError) as e: + # Catch HttpResponseExcept for a non-200 response code + # Catch ValueError for non-JSON response body + logger.warn("Error when performing a 3pid lookup: %s" % (e,)) + return None + + # Check for a mapping from what we looked up to an MXID + if "mappings" not in lookup_results or not isinstance( + lookup_results["mappings"], dict + ): + logger.debug("No results from 3pid lookup") return None + # Return the MXID if it's available, or None otherwise + mxid = lookup_results["mappings"].get(lookup_value) + return mxid + @defer.inlineCallbacks def _verify_any_signature(self, data, server_hostname): if server_hostname not in data["signatures"]: @@ -962,9 +1077,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): ) if complexity: - if complexity["v1"] > max_complexity: - return True - return False + return complexity["v1"] > max_complexity return None @defer.inlineCallbacks @@ -980,10 +1093,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): max_complexity = self.hs.config.limit_remote_rooms.complexity complexity = yield self.store.get_room_complexity(room_id) - if complexity["v1"] > max_complexity: - return True - - return False + return complexity["v1"] > max_complexity @defer.inlineCallbacks def _remote_join(self, requester, remote_room_hosts, room_id, user, content): diff --git a/synapse/util/hash.py b/synapse/util/hash.py new file mode 100644 index 0000000000..359168704e --- /dev/null +++ b/synapse/util/hash.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib + +import unpaddedbase64 + + +def sha256_and_url_safe_base64(input_text): + """SHA256 hash an input string, encode the digest as url-safe base64, and + return + + :param input_text: string to hash + :type input_text: str + + :returns a sha256 hashed and url-safe base64 encoded digest + :rtype: str + """ + digest = hashlib.sha256(input_text.encode()).digest() + return unpaddedbase64.encode_base64(digest, urlsafe=True) -- cgit 1.4.1 From 5798a134c0a98b9b5b15808a3d1b0e3e63fe4030 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 28 Aug 2019 14:25:05 +0100 Subject: Removing entry for 5903 --- changelog.d/5903.feature | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/5903.feature diff --git a/changelog.d/5903.feature b/changelog.d/5903.feature deleted file mode 100644 index fc60d02107..0000000000 --- a/changelog.d/5903.feature +++ /dev/null @@ -1 +0,0 @@ -Add bot user type. -- cgit 1.4.1 From deca277d0972c98a643997d7f6a388b313d2d2fb Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Wed, 28 Aug 2019 15:55:58 +0100 Subject: Let synctl use a config directory. (#5904) * Let synctl use a config directory. --- changelog.d/5904.feature | 1 + synapse/config/__init__.py | 7 ++++--- synctl | 13 ++++++++++--- 3 files changed, 15 insertions(+), 6 deletions(-) create mode 100644 changelog.d/5904.feature diff --git a/changelog.d/5904.feature b/changelog.d/5904.feature new file mode 100644 index 0000000000..43b5304f39 --- /dev/null +++ b/changelog.d/5904.feature @@ -0,0 +1 @@ +Let synctl accept a directory of config files. diff --git a/synapse/config/__init__.py b/synapse/config/__init__.py index f2a5a41e92..1e76e9559d 100644 --- a/synapse/config/__init__.py +++ b/synapse/config/__init__.py @@ -13,8 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import ConfigError +from ._base import ConfigError, find_config_files -# export ConfigError if somebody does import * +# export ConfigError and find_config_files if somebody does +# import * # this is largely a fudge to stop PEP8 moaning about the import -__all__ = ["ConfigError"] +__all__ = ["ConfigError", "find_config_files"] diff --git a/synctl b/synctl index 794de99ea3..a9629cf0e8 100755 --- a/synctl +++ b/synctl @@ -30,6 +30,8 @@ from six import iteritems import yaml +from synapse.config import find_config_files + SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] GREEN = "\x1b[1;32m" @@ -135,7 +137,8 @@ def main(): "configfile", nargs="?", default="homeserver.yaml", - help="the homeserver config file, defaults to homeserver.yaml", + help="the homeserver config file. Defaults to homeserver.yaml. May also be" + " a directory with *.yaml files", ) parser.add_argument( "-w", "--worker", metavar="WORKERCONFIG", help="start or stop a single worker" @@ -176,8 +179,12 @@ def main(): ) sys.exit(1) - with open(configfile) as stream: - config = yaml.safe_load(stream) + config_files = find_config_files([configfile]) + config = {} + for config_file in config_files: + with open(config_file) as file_stream: + yaml_config = yaml.safe_load(file_stream) + config.update(yaml_config) pidfile = config["pid_file"] cache_factor = config.get("synctl_cache_factor") -- cgit 1.4.1 From 92c1550f4abe1aa8495b0e1fc6dc38d338a4ecd1 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Wed, 28 Aug 2019 19:08:32 +0100 Subject: Add a link to python's logging config schema (#5926) --- changelog.d/5926.misc | 1 + docs/sample_config.yaml | 3 ++- synapse/config/logger.py | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5926.misc diff --git a/changelog.d/5926.misc b/changelog.d/5926.misc new file mode 100644 index 0000000000..4383c302ec --- /dev/null +++ b/changelog.d/5926.misc @@ -0,0 +1 @@ +Add link in sample config to the logging config schema. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 6da1167632..43969bbb70 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -485,7 +485,8 @@ database: ## Logging ## -# A yaml python logging config file +# A yaml python logging config file as described by +# https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema # log_config: "CONFDIR/SERVERNAME.log.config" diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 981df5a10c..2704c18720 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -89,7 +89,8 @@ class LoggingConfig(Config): """\ ## Logging ## - # A yaml python logging config file + # A yaml python logging config file as described by + # https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema # log_config: "%(log_config)s" """ -- cgit 1.4.1 From e7011280c7796e437ba34ed97033b120b8556043 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 29 Aug 2019 22:19:57 +1000 Subject: Fix coverage in sytest and use plugins for buildkite (#5922) --- .buildkite/docker-compose.py35.pg95.yaml | 4 +- .buildkite/docker-compose.py37.pg11.yaml | 4 +- .buildkite/docker-compose.py37.pg95.yaml | 4 +- .buildkite/merge_base_branch.sh | 2 +- .buildkite/pipeline.yml | 84 +++++++++++++++++++++++++++----- .coveragerc | 3 +- changelog.d/5922.misc | 1 + tox.ini | 22 ++++----- 8 files changed, 92 insertions(+), 32 deletions(-) create mode 100644 changelog.d/5922.misc diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml index aaea33006b..43237b7775 100644 --- a/.buildkite/docker-compose.py35.pg95.yaml +++ b/.buildkite/docker-compose.py35.pg95.yaml @@ -17,6 +17,6 @@ services: SYNAPSE_POSTGRES_HOST: postgres SYNAPSE_POSTGRES_USER: postgres SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /app + working_dir: /src volumes: - - ..:/app + - ..:/src diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml index 1b32675e78..b767228147 100644 --- a/.buildkite/docker-compose.py37.pg11.yaml +++ b/.buildkite/docker-compose.py37.pg11.yaml @@ -17,6 +17,6 @@ services: SYNAPSE_POSTGRES_HOST: postgres SYNAPSE_POSTGRES_USER: postgres SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /app + working_dir: /src volumes: - - ..:/app + - ..:/src diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml index 7679f6508d..02fcd28304 100644 --- a/.buildkite/docker-compose.py37.pg95.yaml +++ b/.buildkite/docker-compose.py37.pg95.yaml @@ -17,6 +17,6 @@ services: SYNAPSE_POSTGRES_HOST: postgres SYNAPSE_POSTGRES_USER: postgres SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /app + working_dir: /src volumes: - - ..:/app + - ..:/src diff --git a/.buildkite/merge_base_branch.sh b/.buildkite/merge_base_branch.sh index 26176d6465..eb7219a56d 100755 --- a/.buildkite/merge_base_branch.sh +++ b/.buildkite/merge_base_branch.sh @@ -27,7 +27,7 @@ git config --global user.name "A robot" # Fetch and merge. If it doesn't work, it will raise due to set -e. git fetch -u origin $GITBASE -git merge --no-edit origin/$GITBASE +git merge --no-edit --no-commit origin/$GITBASE # Show what we are after. git --no-pager show -s diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index d9327227ed..d901849735 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,8 +1,7 @@ env: - CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f" + COVERALLS_REPO_TOKEN: wsJWOby6j0uCYFiCes3r0XauxO27mx8lD steps: - - command: - "python -m pip install tox" - "tox -e check_codestyle" @@ -10,6 +9,7 @@ steps: plugins: - docker#v3.0.1: image: "python:3.6" + mount-buildkite-agent: false - command: - "python -m pip install tox" @@ -18,6 +18,7 @@ steps: plugins: - docker#v3.0.1: image: "python:3.6" + mount-buildkite-agent: false - command: - "python -m pip install tox" @@ -26,6 +27,7 @@ steps: plugins: - docker#v3.0.1: image: "python:3.6" + mount-buildkite-agent: false - command: - "python -m pip install tox" @@ -36,6 +38,7 @@ steps: - docker#v3.0.1: image: "python:3.6" propagate-environment: true + mount-buildkite-agent: false - command: - "python -m pip install tox" @@ -44,6 +47,7 @@ steps: plugins: - docker#v3.0.1: image: "python:3.6" + mount-buildkite-agent: false - command: - "python -m pip install tox" @@ -52,21 +56,26 @@ steps: plugins: - docker#v3.0.1: image: "python:3.5" + mount-buildkite-agent: false - wait - command: - "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev" - "python3.5 -m pip install tox" - - "tox -e py35-old,codecov" + - "tox -e py35-old,combine" label: ":python: 3.5 / SQLite / Old Deps" env: TRIAL_FLAGS: "-j 2" LANG: "C.UTF-8" plugins: - docker#v3.0.1: - image: "ubuntu:xenial" # We use xenail to get an old sqlite and python + image: "ubuntu:xenial" # We use xenial to get an old sqlite and python + workdir: "/src" + mount-buildkite-agent: false propagate-environment: true + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -76,14 +85,18 @@ steps: - command: - "python -m pip install tox" - - "tox -e py35,codecov" + - "tox -e py35,combine" label: ":python: 3.5 / SQLite" env: TRIAL_FLAGS: "-j 2" plugins: - docker#v3.0.1: image: "python:3.5" + workdir: "/src" + mount-buildkite-agent: false propagate-environment: true + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -93,14 +106,18 @@ steps: - command: - "python -m pip install tox" - - "tox -e py36,codecov" + - "tox -e py36,combine" label: ":python: 3.6 / SQLite" env: TRIAL_FLAGS: "-j 2" plugins: - docker#v3.0.1: image: "python:3.6" + workdir: "/src" + mount-buildkite-agent: false propagate-environment: true + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -110,14 +127,18 @@ steps: - command: - "python -m pip install tox" - - "tox -e py37,codecov" + - "tox -e py37,combine" label: ":python: 3.7 / SQLite" env: TRIAL_FLAGS: "-j 2" plugins: - docker#v3.0.1: image: "python:3.7" + workdir: "/src" + mount-buildkite-agent: false propagate-environment: true + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -131,12 +152,14 @@ steps: env: TRIAL_FLAGS: "-j 8" command: - - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'" + - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,combine'" plugins: - docker-compose#v2.1.0: run: testenv config: - .buildkite/docker-compose.py35.pg95.yaml + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -150,12 +173,14 @@ steps: env: TRIAL_FLAGS: "-j 8" command: - - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'" + - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'" plugins: - docker-compose#v2.1.0: run: testenv config: - .buildkite/docker-compose.py37.pg95.yaml + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -169,12 +194,14 @@ steps: env: TRIAL_FLAGS: "-j 8" command: - - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'" + - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'" plugins: - docker-compose#v2.1.0: run: testenv config: - .buildkite/docker-compose.py37.pg11.yaml + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -182,7 +209,6 @@ steps: - exit_status: 2 limit: 2 - - label: "SyTest - :python: 3.5 / SQLite / Monolith" agents: queue: "medium" @@ -195,6 +221,16 @@ steps: propagate-environment: true always-pull: true workdir: "/src" + entrypoint: ["/bin/sh", "-e", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] + - matrix-org/annotate: + path: "logs/annotate.md" + class: "error" + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -216,6 +252,16 @@ steps: propagate-environment: true always-pull: true workdir: "/src" + entrypoint: ["/bin/sh", "-e", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] + - matrix-org/annotate: + path: "logs/annotate.md" + class: "error" + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 @@ -240,9 +286,25 @@ steps: propagate-environment: true always-pull: true workdir: "/src" + entrypoint: ["/bin/sh", "-e", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] + - matrix-org/annotate: + path: "logs/annotate.md" + class: "error" + - matrix-org/coveralls#v1.0: + parallel: "true" retry: automatic: - exit_status: -1 limit: 2 - exit_status: 2 limit: 2 + + - wait: ~ + continue_on_failure: true + + - label: Trigger webhook + command: "curl -k https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN -d \"payload[build_num]=$BUILDKITE_BUILD_NUMBER&payload[status]=done\"" diff --git a/.coveragerc b/.coveragerc index e9460a340a..11f2ec8387 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,7 +1,8 @@ [run] branch = True parallel = True -include = synapse/* +include=$TOP/synapse/* +data_file = $TOP/.coverage [report] precision = 2 diff --git a/changelog.d/5922.misc b/changelog.d/5922.misc new file mode 100644 index 0000000000..2cc864897e --- /dev/null +++ b/changelog.d/5922.misc @@ -0,0 +1 @@ +Update Buildkite pipeline to use plugins instead of buildkite-agent commands. diff --git a/tox.ini b/tox.ini index f9a3b7e49a..7cb40847b5 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,7 @@ deps = python-subunit junitxml coverage + coverage-enable-subprocess parameterized # cyptography 2.2 requires setuptools >= 18.5 @@ -43,13 +44,13 @@ whitelist_externals = setenv = {[base]setenv} postgres: SYNAPSE_POSTGRES = 1 + TOP={toxinidir} passenv = * commands = /usr/bin/find "{toxinidir}" -name '*.pyc' -delete # Add this so that coverage will run on subprocesses - sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py' {envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} # As of twisted 16.4, trial tries to import the tests as a package (previously @@ -75,8 +76,6 @@ commands = # ) usedevelop=true - - # A test suite for the oldest supported versions of Python libraries, to catch # any uses of APIs not available in them. [testenv:py35-old] @@ -88,6 +87,7 @@ deps = mock lxml coverage + coverage-enable-subprocess commands = /usr/bin/find "{toxinidir}" -name '*.pyc' -delete @@ -96,15 +96,11 @@ commands = # OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis). /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs -d"\n" pip install' - # Add this so that coverage will run on subprocesses - /bin/sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py' - # Install Synapse itself. This won't update any libraries. pip install -e . {envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} - [testenv:packaging] skip_install=True deps = @@ -137,15 +133,15 @@ basepython = python3.6 [testenv:check-sampleconfig] commands = {toxinidir}/scripts-dev/generate_sample_config --check -[testenv:codecov] +[testenv:combine] skip_install = True deps = coverage - codecov -commands = +whitelist_externals = + bash +commands= coverage combine - coverage xml - codecov -X gcov + coverage report [testenv:mypy] basepython = python3.5 @@ -155,4 +151,4 @@ deps = extras = all commands = mypy --ignore-missing-imports \ synapse/logging/_structured.py \ - synapse/logging/_terse_json.py \ No newline at end of file + synapse/logging/_terse_json.py -- cgit 1.4.1 From 5625abe503569f3ffddcc4a2c0d6aa5aad3f188d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 30 Aug 2019 15:06:40 +1000 Subject: Fix buildkite pipeline plugin matrix-org/annotate using the wrong variable config --- .buildkite/pipeline.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index d901849735..368fb49be4 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -228,7 +228,7 @@ steps: upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] - matrix-org/annotate: path: "logs/annotate.md" - class: "error" + style: "error" - matrix-org/coveralls#v1.0: parallel: "true" retry: @@ -259,7 +259,7 @@ steps: upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] - matrix-org/annotate: path: "logs/annotate.md" - class: "error" + style: "error" - matrix-org/coveralls#v1.0: parallel: "true" retry: @@ -293,7 +293,7 @@ steps: upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] - matrix-org/annotate: path: "logs/annotate.md" - class: "error" + style: "error" - matrix-org/coveralls#v1.0: parallel: "true" retry: -- cgit 1.4.1 From 3057095a5dab5a6f1c023f25386a6f1e4346bd9f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 30 Aug 2019 12:00:20 +0100 Subject: Revert "Use the v2 lookup API for 3PID invites (#5897)" (#5937) This reverts commit 71fc04069a5770a204c3514e0237d7374df257a8. This broke 3PID invites as #5892 was required for it to work correctly. --- changelog.d/5897.feature | 1 - synapse/handlers/identity.py | 13 ---- synapse/handlers/room_member.py | 128 +++------------------------------------- synapse/util/hash.py | 33 ----------- 4 files changed, 9 insertions(+), 166 deletions(-) delete mode 100644 changelog.d/5897.feature delete mode 100644 synapse/util/hash.py diff --git a/changelog.d/5897.feature b/changelog.d/5897.feature deleted file mode 100644 index 7b10774c96..0000000000 --- a/changelog.d/5897.feature +++ /dev/null @@ -1 +0,0 @@ -Switch to the v2 lookup API for 3PID invites. \ No newline at end of file diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 97daca5fee..d199521b58 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -282,16 +282,3 @@ class IdentityHandler(BaseHandler): except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() - - -class LookupAlgorithm: - """ - Supported hashing algorithms when performing a 3PID lookup. - - SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64 - encoding - NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext - """ - - SHA256 = "sha256" - NONE = "none" diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 4605cb9c0b..249a6d9c5d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -29,11 +29,9 @@ from twisted.internet import defer from synapse import types from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError -from synapse.handlers.identity import LookupAlgorithm from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room -from synapse.util.hash import sha256_and_url_safe_base64 from ._base import BaseHandler @@ -525,7 +523,7 @@ class RoomMemberHandler(object): event (SynapseEvent): The membership event. context: The context of the event. is_guest (bool): Whether the sender is a guest. - remote_room_hosts (list[str]|None): Homeservers which are likely to already be in + room_hosts ([str]): Homeservers which are likely to already be in the room, and could be danced with in order to join this homeserver for the first time. ratelimit (bool): Whether to rate limit this request. @@ -636,7 +634,7 @@ class RoomMemberHandler(object): servers.remove(room_alias.domain) servers.insert(0, room_alias.domain) - return RoomID.from_string(room_id), servers + return (RoomID.from_string(room_id), servers) @defer.inlineCallbacks def _get_inviter(self, user_id, room_id): @@ -699,44 +697,6 @@ class RoomMemberHandler(object): raise SynapseError( 403, "Looking up third-party identifiers is denied from this server" ) - - # Check what hashing details are supported by this identity server - use_v1 = False - hash_details = None - try: - hash_details = yield self.simple_http_client.get_json( - "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server) - ) - except (HttpResponseException, ValueError) as e: - # Catch HttpResponseExcept for a non-200 response code - # Catch ValueError for non-JSON response body - - # Check if this identity server does not know about v2 lookups - if e.code == 404: - # This is an old identity server that does not yet support v2 lookups - use_v1 = True - else: - logger.warn("Error when looking up hashing details: %s" % (e,)) - return None - - if use_v1: - return (yield self._lookup_3pid_v1(id_server, medium, address)) - - return (yield self._lookup_3pid_v2(id_server, medium, address, hash_details)) - - @defer.inlineCallbacks - def _lookup_3pid_v1(self, id_server, medium, address): - """Looks up a 3pid in the passed identity server using v1 lookup. - - Args: - id_server (str): The server name (including port, if required) - of the identity server to use. - medium (str): The type of the third party identifier (e.g. "email"). - address (str): The third party identifier (e.g. "foo@example.com"). - - Returns: - str: the matrix ID of the 3pid, or None if it is not recognized. - """ try: data = yield self.simple_http_client.get_json( "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), @@ -751,83 +711,8 @@ class RoomMemberHandler(object): except IOError as e: logger.warn("Error from identity server lookup: %s" % (e,)) - - return None - - @defer.inlineCallbacks - def _lookup_3pid_v2(self, id_server, medium, address, hash_details): - """Looks up a 3pid in the passed identity server using v2 lookup. - - Args: - id_server (str): The server name (including port, if required) - of the identity server to use. - medium (str): The type of the third party identifier (e.g. "email"). - address (str): The third party identifier (e.g. "foo@example.com"). - hash_details (dict[str, str|list]): A dictionary containing hashing information - provided by an identity server. - - Returns: - Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised. - """ - # Extract information from hash_details - supported_lookup_algorithms = hash_details["algorithms"] - lookup_pepper = hash_details["lookup_pepper"] - - # Check if any of the supported lookup algorithms are present - if LookupAlgorithm.SHA256 in supported_lookup_algorithms: - # Perform a hashed lookup - lookup_algorithm = LookupAlgorithm.SHA256 - - # Hash address, medium and the pepper with sha256 - to_hash = "%s %s %s" % (address, medium, lookup_pepper) - lookup_value = sha256_and_url_safe_base64(to_hash) - - elif LookupAlgorithm.NONE in supported_lookup_algorithms: - # Perform a non-hashed lookup - lookup_algorithm = LookupAlgorithm.NONE - - # Combine together plaintext address and medium - lookup_value = "%s %s" % (address, medium) - - else: - logger.warn( - "None of the provided lookup algorithms of %s%s are supported: %s", - id_server_scheme, - id_server, - hash_details["algorithms"], - ) - raise SynapseError( - 400, - "Provided identity server does not support any v2 lookup " - "algorithms that this homeserver supports.", - ) - - try: - lookup_results = yield self.simple_http_client.post_json_get_json( - "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), - { - "addresses": [lookup_value], - "algorithm": lookup_algorithm, - "pepper": lookup_pepper, - }, - ) - except (HttpResponseException, ValueError) as e: - # Catch HttpResponseExcept for a non-200 response code - # Catch ValueError for non-JSON response body - logger.warn("Error when performing a 3pid lookup: %s" % (e,)) - return None - - # Check for a mapping from what we looked up to an MXID - if "mappings" not in lookup_results or not isinstance( - lookup_results["mappings"], dict - ): - logger.debug("No results from 3pid lookup") return None - # Return the MXID if it's available, or None otherwise - mxid = lookup_results["mappings"].get(lookup_value) - return mxid - @defer.inlineCallbacks def _verify_any_signature(self, data, server_hostname): if server_hostname not in data["signatures"]: @@ -1077,7 +962,9 @@ class RoomMemberMasterHandler(RoomMemberHandler): ) if complexity: - return complexity["v1"] > max_complexity + if complexity["v1"] > max_complexity: + return True + return False return None @defer.inlineCallbacks @@ -1093,7 +980,10 @@ class RoomMemberMasterHandler(RoomMemberHandler): max_complexity = self.hs.config.limit_remote_rooms.complexity complexity = yield self.store.get_room_complexity(room_id) - return complexity["v1"] > max_complexity + if complexity["v1"] > max_complexity: + return True + + return False @defer.inlineCallbacks def _remote_join(self, requester, remote_room_hosts, room_id, user, content): diff --git a/synapse/util/hash.py b/synapse/util/hash.py deleted file mode 100644 index 359168704e..0000000000 --- a/synapse/util/hash.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hashlib - -import unpaddedbase64 - - -def sha256_and_url_safe_base64(input_text): - """SHA256 hash an input string, encode the digest as url-safe base64, and - return - - :param input_text: string to hash - :type input_text: str - - :returns a sha256 hashed and url-safe base64 encoded digest - :rtype: str - """ - digest = hashlib.sha256(input_text.encode()).digest() - return unpaddedbase64.encode_base64(digest, urlsafe=True) -- cgit 1.4.1 From d19505a8c1cfb91f653d14965f611a0d4d5e617f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 30 Aug 2019 23:13:16 +1000 Subject: Removed unused jenkins/ folder and script (#5938) --- changelog.d/5938.misc | 1 + jenkins/prepare_synapse.sh | 16 ---------------- 2 files changed, 1 insertion(+), 16 deletions(-) create mode 100644 changelog.d/5938.misc delete mode 100755 jenkins/prepare_synapse.sh diff --git a/changelog.d/5938.misc b/changelog.d/5938.misc new file mode 100644 index 0000000000..b5a3b6ee3b --- /dev/null +++ b/changelog.d/5938.misc @@ -0,0 +1 @@ +Remove unused jenkins/prepare_sytest.sh file. diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh deleted file mode 100755 index 016afb8baa..0000000000 --- a/jenkins/prepare_synapse.sh +++ /dev/null @@ -1,16 +0,0 @@ -#! /bin/bash - -set -eux - -cd "`dirname $0`/.." - -TOX_DIR=$WORKSPACE/.tox - -mkdir -p $TOX_DIR - -if ! [ $TOX_DIR -ef .tox ]; then - ln -s "$TOX_DIR" .tox -fi - -# set up the virtualenv -tox -e py27 --notest -v -- cgit 1.4.1 From 4765f0cfd95f6160f32c75481651d125f343cd58 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 30 Aug 2019 15:22:51 +0100 Subject: Add m.id_access_token flag (#5930) Adds a flag to `/versions`' `unstable_features` section indicating that this Synapse understands what an `id_access_token` is, as per https://github.com/matrix-org/synapse/issues/5927#issuecomment-523566043 Fixes #5927 --- changelog.d/5930.misc | 1 + synapse/rest/client/versions.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5930.misc diff --git a/changelog.d/5930.misc b/changelog.d/5930.misc new file mode 100644 index 0000000000..81dcc10e6d --- /dev/null +++ b/changelog.d/5930.misc @@ -0,0 +1 @@ +Add temporary flag to /versions in unstable_features to indicate this Synapse supports receiving id_access_token parameters on calls to identity server-proxying endpoints. \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 0e09191632..c51c9e617d 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -44,7 +44,12 @@ class VersionsRestServlet(RestServlet): "r0.5.0", ], # as per MSC1497: - "unstable_features": {"m.lazy_load_members": True}, + "unstable_features": { + "m.lazy_load_members": True, + # as per https://github.com/matrix-org/synapse/issues/5927 + # to be removed in r0.6.0 + "m.id_access_token": True, + }, }, ) -- cgit 1.4.1 From 4fca313389f2acbb32b3833babc8f3ad721e3047 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 31 Aug 2019 01:01:57 +1000 Subject: Move buildkite config to the pipelines repo (#5943) --- .buildkite/format_tap.py | 15 +++ .buildkite/pipeline.yml | 310 ----------------------------------------------- changelog.d/5943.misc | 1 + 3 files changed, 16 insertions(+), 310 deletions(-) delete mode 100644 .buildkite/pipeline.yml create mode 100644 changelog.d/5943.misc diff --git a/.buildkite/format_tap.py b/.buildkite/format_tap.py index 94582f5571..b557a9c38e 100644 --- a/.buildkite/format_tap.py +++ b/.buildkite/format_tap.py @@ -1,3 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys from tap.parser import Parser from tap.line import Result, Unknown, Diagnostic diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml deleted file mode 100644 index 368fb49be4..0000000000 --- a/.buildkite/pipeline.yml +++ /dev/null @@ -1,310 +0,0 @@ -env: - COVERALLS_REPO_TOKEN: wsJWOby6j0uCYFiCes3r0XauxO27mx8lD - -steps: - - command: - - "python -m pip install tox" - - "tox -e check_codestyle" - label: "\U0001F9F9 Check Style" - plugins: - - docker#v3.0.1: - image: "python:3.6" - mount-buildkite-agent: false - - - command: - - "python -m pip install tox" - - "tox -e packaging" - label: "\U0001F9F9 packaging" - plugins: - - docker#v3.0.1: - image: "python:3.6" - mount-buildkite-agent: false - - - command: - - "python -m pip install tox" - - "tox -e check_isort" - label: "\U0001F9F9 isort" - plugins: - - docker#v3.0.1: - image: "python:3.6" - mount-buildkite-agent: false - - - command: - - "python -m pip install tox" - - "scripts-dev/check-newsfragment" - label: ":newspaper: Newsfile" - branches: "!master !develop !release-*" - plugins: - - docker#v3.0.1: - image: "python:3.6" - propagate-environment: true - mount-buildkite-agent: false - - - command: - - "python -m pip install tox" - - "tox -e check-sampleconfig" - label: "\U0001F9F9 check-sample-config" - plugins: - - docker#v3.0.1: - image: "python:3.6" - mount-buildkite-agent: false - - - command: - - "python -m pip install tox" - - "tox -e mypy" - label: ":mypy: mypy" - plugins: - - docker#v3.0.1: - image: "python:3.5" - mount-buildkite-agent: false - - - wait - - - command: - - "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev" - - "python3.5 -m pip install tox" - - "tox -e py35-old,combine" - label: ":python: 3.5 / SQLite / Old Deps" - env: - TRIAL_FLAGS: "-j 2" - LANG: "C.UTF-8" - plugins: - - docker#v3.0.1: - image: "ubuntu:xenial" # We use xenial to get an old sqlite and python - workdir: "/src" - mount-buildkite-agent: false - propagate-environment: true - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - command: - - "python -m pip install tox" - - "tox -e py35,combine" - label: ":python: 3.5 / SQLite" - env: - TRIAL_FLAGS: "-j 2" - plugins: - - docker#v3.0.1: - image: "python:3.5" - workdir: "/src" - mount-buildkite-agent: false - propagate-environment: true - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - command: - - "python -m pip install tox" - - "tox -e py36,combine" - label: ":python: 3.6 / SQLite" - env: - TRIAL_FLAGS: "-j 2" - plugins: - - docker#v3.0.1: - image: "python:3.6" - workdir: "/src" - mount-buildkite-agent: false - propagate-environment: true - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - command: - - "python -m pip install tox" - - "tox -e py37,combine" - label: ":python: 3.7 / SQLite" - env: - TRIAL_FLAGS: "-j 2" - plugins: - - docker#v3.0.1: - image: "python:3.7" - workdir: "/src" - mount-buildkite-agent: false - propagate-environment: true - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: ":python: 3.5 / :postgres: 9.5" - agents: - queue: "medium" - env: - TRIAL_FLAGS: "-j 8" - command: - - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,combine'" - plugins: - - docker-compose#v2.1.0: - run: testenv - config: - - .buildkite/docker-compose.py35.pg95.yaml - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: ":python: 3.7 / :postgres: 9.5" - agents: - queue: "medium" - env: - TRIAL_FLAGS: "-j 8" - command: - - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'" - plugins: - - docker-compose#v2.1.0: - run: testenv - config: - - .buildkite/docker-compose.py37.pg95.yaml - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: ":python: 3.7 / :postgres: 11" - agents: - queue: "medium" - env: - TRIAL_FLAGS: "-j 8" - command: - - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'" - plugins: - - docker-compose#v2.1.0: - run: testenv - config: - - .buildkite/docker-compose.py37.pg11.yaml - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: "SyTest - :python: 3.5 / SQLite / Monolith" - agents: - queue: "medium" - command: - - "bash .buildkite/merge_base_branch.sh" - - "bash /synapse_sytest.sh" - plugins: - - docker#v3.0.1: - image: "matrixdotorg/sytest-synapse:py35" - propagate-environment: true - always-pull: true - workdir: "/src" - entrypoint: ["/bin/sh", "-e", "-c"] - mount-buildkite-agent: false - volumes: ["./logs:/logs"] - - artifacts#v1.2.0: - upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] - - matrix-org/annotate: - path: "logs/annotate.md" - style: "error" - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith" - agents: - queue: "medium" - env: - POSTGRES: "1" - command: - - "bash .buildkite/merge_base_branch.sh" - - "bash /synapse_sytest.sh" - plugins: - - docker#v3.0.1: - image: "matrixdotorg/sytest-synapse:py35" - propagate-environment: true - always-pull: true - workdir: "/src" - entrypoint: ["/bin/sh", "-e", "-c"] - mount-buildkite-agent: false - volumes: ["./logs:/logs"] - - artifacts#v1.2.0: - upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] - - matrix-org/annotate: - path: "logs/annotate.md" - style: "error" - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers" - agents: - queue: "medium" - env: - POSTGRES: "1" - WORKERS: "1" - BLACKLIST: "synapse-blacklist-with-workers" - command: - - "bash .buildkite/merge_base_branch.sh" - - "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'" - - "bash /synapse_sytest.sh" - plugins: - - docker#v3.0.1: - image: "matrixdotorg/sytest-synapse:py35" - propagate-environment: true - always-pull: true - workdir: "/src" - entrypoint: ["/bin/sh", "-e", "-c"] - mount-buildkite-agent: false - volumes: ["./logs:/logs"] - - artifacts#v1.2.0: - upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] - - matrix-org/annotate: - path: "logs/annotate.md" - style: "error" - - matrix-org/coveralls#v1.0: - parallel: "true" - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - wait: ~ - continue_on_failure: true - - - label: Trigger webhook - command: "curl -k https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN -d \"payload[build_num]=$BUILDKITE_BUILD_NUMBER&payload[status]=done\"" diff --git a/changelog.d/5943.misc b/changelog.d/5943.misc new file mode 100644 index 0000000000..6545e1244a --- /dev/null +++ b/changelog.d/5943.misc @@ -0,0 +1 @@ +Move Buildkite pipeline config to the pipelines repo. -- cgit 1.4.1 From 4548d1f87e3ff3dc24b0af8f944276137d3228e3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 30 Aug 2019 16:28:26 +0100 Subject: Remove unnecessary parentheses around return statements (#5931) Python will return a tuple whether there are parentheses around the returned values or not. I'm just sick of my editor complaining about this all over the place :) --- changelog.d/5931.misc | 1 + synapse/api/auth.py | 14 ++--- synapse/app/frontend_proxy.py | 8 +-- synapse/crypto/event_signing.py | 4 +- synapse/federation/federation_client.py | 8 +-- synapse/federation/federation_server.py | 22 ++++---- synapse/handlers/account_data.py | 4 +- synapse/handlers/auth.py | 8 +-- synapse/handlers/federation.py | 2 +- synapse/handlers/initial_sync.py | 4 +- synapse/handlers/presence.py | 4 +- synapse/handlers/receipts.py | 2 +- synapse/handlers/register.py | 2 +- synapse/handlers/room_member.py | 2 +- synapse/handlers/sync.py | 8 +-- synapse/handlers/typing.py | 2 +- synapse/http/federation/well_known_resolver.py | 2 +- synapse/module_api/__init__.py | 2 +- synapse/notifier.py | 6 +- synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/replication/http/federation.py | 8 +-- synapse/replication/http/login.py | 2 +- synapse/replication/http/membership.py | 6 +- synapse/replication/http/register.py | 4 +- synapse/replication/http/send_event.py | 2 +- synapse/replication/tcp/streams/_base.py | 8 +-- synapse/rest/admin/__init__.py | 26 ++++----- synapse/rest/admin/media.py | 6 +- synapse/rest/admin/purge_room_servlet.py | 2 +- synapse/rest/admin/server_notice_servlet.py | 2 +- synapse/rest/admin/users.py | 4 +- synapse/rest/client/v1/directory.py | 16 +++--- synapse/rest/client/v1/events.py | 8 +-- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 6 +- synapse/rest/client/v1/logout.py | 8 +-- synapse/rest/client/v1/presence.py | 6 +- synapse/rest/client/v1/profile.py | 18 +++--- synapse/rest/client/v1/push_rule.py | 10 ++-- synapse/rest/client/v1/pusher.py | 6 +- synapse/rest/client/v1/room.py | 48 ++++++++-------- synapse/rest/client/v1/voip.py | 4 +- synapse/rest/client/v2_alpha/account.py | 24 ++++---- synapse/rest/client/v2_alpha/account_data.py | 8 +-- synapse/rest/client/v2_alpha/capabilities.py | 2 +- synapse/rest/client/v2_alpha/devices.py | 10 ++-- synapse/rest/client/v2_alpha/filter.py | 4 +- synapse/rest/client/v2_alpha/groups.py | 64 +++++++++++----------- synapse/rest/client/v2_alpha/keys.py | 8 +-- synapse/rest/client/v2_alpha/notifications.py | 2 +- synapse/rest/client/v2_alpha/read_marker.py | 2 +- synapse/rest/client/v2_alpha/receipts.py | 2 +- synapse/rest/client/v2_alpha/register.py | 10 ++-- synapse/rest/client/v2_alpha/relations.py | 8 +-- synapse/rest/client/v2_alpha/report_event.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 14 ++--- .../client/v2_alpha/room_upgrade_rest_servlet.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/client/v2_alpha/tags.py | 6 +- synapse/rest/client/v2_alpha/thirdparty.py | 10 ++-- synapse/rest/client/v2_alpha/user_directory.py | 4 +- synapse/rest/media/v1/media_repository.py | 4 +- synapse/rest/media/v1/thumbnailer.py | 4 +- .../resource_limits_server_notices.py | 2 +- synapse/storage/account_data.py | 8 +-- synapse/storage/appservice.py | 2 +- synapse/storage/deviceinbox.py | 4 +- synapse/storage/devices.py | 10 ++-- synapse/storage/events.py | 10 ++-- synapse/storage/presence.py | 2 +- synapse/storage/pusher.py | 2 +- synapse/storage/receipts.py | 2 +- synapse/storage/stream.py | 12 ++-- synapse/storage/util/id_generators.py | 4 +- synapse/streams/config.py | 2 +- tests/handlers/test_register.py | 2 +- tests/rest/client/v2_alpha/test_register.py | 2 +- tests/server.py | 2 +- tests/test_server.py | 2 +- tests/test_state.py | 2 +- tests/utils.py | 2 +- 81 files changed, 287 insertions(+), 286 deletions(-) create mode 100644 changelog.d/5931.misc diff --git a/changelog.d/5931.misc b/changelog.d/5931.misc new file mode 100644 index 0000000000..ac8e74f5b9 --- /dev/null +++ b/changelog.d/5931.misc @@ -0,0 +1 @@ +Remove unnecessary parentheses in return statements. \ No newline at end of file diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 7b3a5a8221..fd3cdf50b0 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -276,25 +276,25 @@ class Auth(object): self.get_access_token_from_request(request) ) if app_service is None: - return (None, None) + return None, None if app_service.ip_range_whitelist: ip_address = IPAddress(self.hs.get_ip_from_request(request)) if ip_address not in app_service.ip_range_whitelist: - return (None, None) + return None, None if b"user_id" not in request.args: - return (app_service.sender, app_service) + return app_service.sender, app_service user_id = request.args[b"user_id"][0].decode("utf8") if app_service.sender == user_id: - return (app_service.sender, app_service) + return app_service.sender, app_service if not app_service.is_interested_in_user(user_id): raise AuthError(403, "Application service cannot masquerade as this user.") if not (yield self.store.get_user_by_id(user_id)): raise AuthError(403, "Application service has not registered this user") - return (user_id, app_service) + return user_id, app_service @defer.inlineCallbacks def get_user_by_access_token(self, token, rights="access"): @@ -694,7 +694,7 @@ class Auth(object): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.check_user_was_in_room(room_id, user_id) - return (member_event.membership, member_event.event_id) + return member_event.membership, member_event.event_id except AuthError: visibility = yield self.state.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" @@ -703,7 +703,7 @@ class Auth(object): visibility and visibility.content["history_visibility"] == "world_readable" ): - return (Membership.JOIN, None) + return Membership.JOIN, None return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 611d285421..9504bfbc70 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet): except HttpResponseException as e: raise e.to_synapse_error() - return (200, result) + return 200, result @defer.inlineCallbacks def on_PUT(self, request, user_id): yield self.auth.get_user_by_req(request) - return (200, {}) + return 200, {} class KeyUploadServlet(RestServlet): @@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet): self.main_uri + request.uri.decode("ascii"), body, headers=headers ) - return (200, result) + return 200, result else: # Just interested in counts. result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - return (200, {"one_time_key_counts": result}) + return 200, {"one_time_key_counts": result} class FrontendProxySlavedStore( diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 41eabbe717..694fb2c816 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -83,7 +83,7 @@ def compute_content_hash(event_dict, hash_algorithm): event_json_bytes = encode_canonical_json(event_dict) hashed = hash_algorithm(event_json_bytes) - return (hashed.name, hashed.digest()) + return hashed.name, hashed.digest() def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256): @@ -106,7 +106,7 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256): event_dict.pop("unsigned", None) event_json_bytes = encode_canonical_json(event_dict) hashed = hash_algorithm(event_json_bytes) - return (hashed.name, hashed.digest()) + return hashed.name, hashed.digest() def compute_event_signature(event_dict, signature_name, signing_key): diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index bec3080895..6ee6216660 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -355,7 +355,7 @@ class FederationClient(FederationBase): auth_chain.sort(key=lambda e: e.depth) - return (pdus, auth_chain) + return pdus, auth_chain except HttpResponseException as e: if e.code == 400 or e.code == 404: logger.info("Failed to use get_room_state_ids API, falling back") @@ -404,7 +404,7 @@ class FederationClient(FederationBase): signed_auth.sort(key=lambda e: e.depth) - return (signed_pdus, signed_auth) + return signed_pdus, signed_auth @defer.inlineCallbacks def get_events_from_store_or_dest(self, destination, room_id, event_ids): @@ -429,7 +429,7 @@ class FederationClient(FederationBase): missing_events.discard(k) if not missing_events: - return (signed_events, failed_to_fetch) + return signed_events, failed_to_fetch logger.debug( "Fetching unknown state/auth events %s for room %s", @@ -465,7 +465,7 @@ class FederationClient(FederationBase): # We removed all events we successfully fetched from `batch` failed_to_fetch.update(batch) - return (signed_events, failed_to_fetch) + return signed_events, failed_to_fetch @defer.inlineCallbacks @log_function diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 05fd49f3c1..e5f0b90aec 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -100,7 +100,7 @@ class FederationServer(FederationBase): res = self._transaction_from_pdus(pdus).get_dict() - return (200, res) + return 200, res @defer.inlineCallbacks @log_function @@ -163,7 +163,7 @@ class FederationServer(FederationBase): yield self.transaction_actions.set_response( origin, transaction, 400, response ) - return (400, response) + return 400, response received_pdus_counter.inc(len(transaction.pdus)) @@ -265,7 +265,7 @@ class FederationServer(FederationBase): logger.debug("Returning: %s", str(response)) yield self.transaction_actions.set_response(origin, transaction, 200, response) - return (200, response) + return 200, response @defer.inlineCallbacks def received_edu(self, origin, edu_type, content): @@ -298,7 +298,7 @@ class FederationServer(FederationBase): event_id, ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_state_ids_request(self, origin, room_id, event_id): @@ -315,7 +315,7 @@ class FederationServer(FederationBase): state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id) auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids) - return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}) + return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids} @defer.inlineCallbacks def _on_context_state_request_compute(self, room_id, event_id): @@ -345,15 +345,15 @@ class FederationServer(FederationBase): pdu = yield self.handler.get_persisted_pdu(origin, event_id) if pdu: - return (200, self._transaction_from_pdus([pdu]).get_dict()) + return 200, self._transaction_from_pdus([pdu]).get_dict() else: - return (404, "") + return 404, "" @defer.inlineCallbacks def on_query_request(self, query_type, args): received_queries_counter.labels(query_type).inc() resp = yield self.registry.on_query(query_type, args) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_make_join_request(self, origin, room_id, user_id, supported_versions): @@ -435,7 +435,7 @@ class FederationServer(FederationBase): logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) yield self.handler.on_send_leave_request(origin, pdu) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_event_auth(self, origin, room_id, event_id): @@ -446,7 +446,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() auth_pdus = yield self.handler.on_event_auth(event_id) res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} - return (200, res) + return 200, res @defer.inlineCallbacks def on_query_auth_request(self, origin, content, room_id, event_id): @@ -499,7 +499,7 @@ class FederationServer(FederationBase): "missing": ret.get("missing", []), } - return (200, send_content) + return 200, send_content @log_function def on_query_client_keys(self, origin, content): diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 8acd9f9a83..38bc67191c 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -51,8 +51,8 @@ class AccountDataEventSource(object): {"type": account_data_type, "content": content, "room_id": room_id} ) - return (results, current_stream_id) + return results, current_stream_id @defer.inlineCallbacks def get_pagination_rows(self, user, config, key): - return ([], config.to_id) + return [], config.to_id diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 0f3ebf7ef8..f844409d21 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -280,7 +280,7 @@ class AuthHandler(BaseHandler): creds, list(clientdict), ) - return (creds, clientdict, session["id"]) + return creds, clientdict, session["id"] ret = self._auth_dict_for_flows(flows, session) ret["completed"] = list(creds) @@ -722,7 +722,7 @@ class AuthHandler(BaseHandler): known_login_type = True is_valid = yield provider.check_password(qualified_user_id, password) if is_valid: - return (qualified_user_id, None) + return qualified_user_id, None if not hasattr(provider, "get_supported_login_types") or not hasattr( provider, "check_auth" @@ -766,7 +766,7 @@ class AuthHandler(BaseHandler): ) if canonical_user_id: - return (canonical_user_id, None) + return canonical_user_id, None if not known_login_type: raise SynapseError(400, "Unknown login type %s" % login_type) @@ -816,7 +816,7 @@ class AuthHandler(BaseHandler): result = (result, None) return result - return (None, None) + return None, None @defer.inlineCallbacks def _check_local_password(self, user_id, password): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 94306c94a9..538b16efd6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1428,7 +1428,7 @@ class FederationHandler(BaseHandler): assert event.user_id == user_id assert event.state_key == user_id assert event.room_id == room_id - return (origin, event, format_ver) + return origin, event, format_ver @defer.inlineCallbacks @log_function diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 42d6650ed9..595f75400b 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -449,7 +449,7 @@ class InitialSyncHandler(BaseHandler): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.auth.check_user_was_in_room(room_id, user_id) - return (member_event.membership, member_event.event_id) + return member_event.membership, member_event.event_id return except AuthError: visibility = yield self.state_handler.get_current_state( @@ -459,7 +459,7 @@ class InitialSyncHandler(BaseHandler): visibility and visibility.content["history_visibility"] == "world_readable" ): - return (Membership.JOIN, None) + return Membership.JOIN, None return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 94a9ca0357..8377a0ddc2 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1032,7 +1032,7 @@ class PresenceEventSource(object): # # Hence this guard where we just return nothing so that the sync # doesn't return. C.f. #5503. - return ([], max_token) + return [], max_token presence = self.get_presence_handler() stream_change_cache = self.store.presence_stream_cache @@ -1279,7 +1279,7 @@ def get_interested_parties(store, states): # Always notify self users_to_states.setdefault(state.user_id, []).append(state) - return (room_ids_to_states, users_to_states) + return room_ids_to_states, users_to_states @defer.inlineCallbacks diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 73973502a4..6854c751a6 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -148,7 +148,7 @@ class ReceiptEventSource(object): to_key = yield self.get_current_key() if from_key == to_key: - return ([], to_key) + return [], to_key events = yield self.store.get_linearized_receipts_for_rooms( room_ids, from_key=from_key, to_key=to_key diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 4631fab94e..be0425a33b 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -622,7 +622,7 @@ class RegistrationHandler(BaseHandler): initial_display_name=initial_display_name, is_guest=is_guest, ) - return (r["device_id"], r["access_token"]) + return r["device_id"], r["access_token"] valid_until_ms = None if self.session_lifetime is not None: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 249a6d9c5d..f03a2bd540 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -903,7 +903,7 @@ class RoomMemberHandler(object): if not public_keys: public_keys.append(fallback_public_key) display_name = data["display_name"] - return (token, public_keys, fallback_public_key, display_name) + return token, public_keys, fallback_public_key, display_name @defer.inlineCallbacks def _is_host_in_room(self, current_state_ids): diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ef7f2ca980..d582f8e494 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -378,7 +378,7 @@ class SyncHandler(object): event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"} ephemeral_by_room.setdefault(room_id, []).append(event_copy) - return (now_token, ephemeral_by_room) + return now_token, ephemeral_by_room @defer.inlineCallbacks def _load_filtered_recents( @@ -1332,7 +1332,7 @@ class SyncHandler(object): ) if not tags_by_room: logger.debug("no-oping sync") - return ([], [], [], []) + return [], [], [], [] ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id @@ -1642,7 +1642,7 @@ class SyncHandler(object): ) room_entries.append(entry) - return (room_entries, invited, newly_joined_rooms, newly_left_rooms) + return room_entries, invited, newly_joined_rooms, newly_left_rooms @defer.inlineCallbacks def _get_all_rooms(self, sync_result_builder, ignored_users): @@ -1716,7 +1716,7 @@ class SyncHandler(object): ) ) - return (room_entries, invited, []) + return room_entries, invited, [] @defer.inlineCallbacks def _generate_room_entry( diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index f882330293..ca8ae9fb5b 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -319,4 +319,4 @@ class TypingNotificationEventSource(object): return self.get_typing_handler()._latest_room_serial def get_pagination_rows(self, user, pagination_config, key): - return ([], pagination_config.from_key) + return [], pagination_config.from_key diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index 5e9b0befb0..7ddfad286d 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -207,7 +207,7 @@ class WellKnownResolver(object): cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID, ) - return (result, cache_period) + return result, cache_period @defer.inlineCallbacks def _make_well_known_request(self, server_name, retry): diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 41147d4292..735b882363 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -101,7 +101,7 @@ class ModuleApi(object): ) user_id = yield self.register_user(localpart, displayname, emails) _, access_token = yield self.register_device(user_id) - return (user_id, access_token) + return user_id, access_token def register_user(self, localpart, displayname=None, emails=[]): """Registers a new user with given localpart and optional displayname, emails. diff --git a/synapse/notifier.py b/synapse/notifier.py index bd80c801b6..4e091314e6 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -472,11 +472,11 @@ class Notifier(object): joined_room_ids = yield self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: - return ([explicit_room_id], True) + return [explicit_room_id], True if (yield self._is_world_readable(explicit_room_id)): - return ([explicit_room_id], False) + return [explicit_room_id], False raise AuthError(403, "Non-joined access not allowed") - return (joined_room_ids, True) + return joined_room_ids, True @defer.inlineCallbacks def _is_world_readable(self, room_id): diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c831975635..22491f3700 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -134,7 +134,7 @@ class BulkPushRuleEvaluator(object): pl_event = auth_events.get(POWER_KEY) - return (pl_event.content if pl_event else {}, sender_level) + return pl_event.content if pl_event else {}, sender_level @defer.inlineCallbacks def action_for_event_by_user(self, event, context): diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index fed4f08820..2f16955954 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -113,7 +113,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): event_and_contexts, backfilled ) - return (200, {}) + return 200, {} class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): @@ -156,7 +156,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): result = yield self.registry.on_edu(edu_type, origin, edu_content) - return (200, result) + return 200, result class ReplicationGetQueryRestServlet(ReplicationEndpoint): @@ -204,7 +204,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint): result = yield self.registry.on_query(query_type, args) - return (200, result) + return 200, result class ReplicationCleanRoomRestServlet(ReplicationEndpoint): @@ -238,7 +238,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): def _handle_request(self, request, room_id): yield self.store.clean_room_for_join(room_id) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index f17d3a2da4..786f5232b2 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -64,7 +64,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint): user_id, device_id, initial_display_name, is_guest ) - return (200, {"device_id": device_id, "access_token": access_token}) + return 200, {"device_id": device_id, "access_token": access_token} def register_servlets(hs, http_server): diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 4217335d88..b9ce3477ad 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -83,7 +83,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): remote_room_hosts, room_id, user_id, event_content ) - return (200, {}) + return 200, {} class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): @@ -153,7 +153,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): yield self.store.locally_reject_invite(user_id, room_id) ret = {} - return (200, ret) + return 200, ret class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): @@ -202,7 +202,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): else: raise Exception("Unrecognized change: %r", change) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 3341320a87..87fe2dd9b0 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -90,7 +90,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): address=content["address"], ) - return (200, {}) + return 200, {} class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): @@ -143,7 +143,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): bind_msisdn=bind_msisdn, ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index eff7bd7305..adb9b2f7f4 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -117,7 +117,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index c10b85d2ff..f03111c259 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -158,7 +158,7 @@ class Stream(object): updates, current_token = yield self.get_updates_since(self.last_token) self.last_token = current_token - return (updates, current_token) + return updates, current_token @defer.inlineCallbacks def get_updates_since(self, from_token): @@ -172,14 +172,14 @@ class Stream(object): sent over the replication steam. """ if from_token in ("NOW", "now"): - return ([], self.upto_token) + return [], self.upto_token current_token = self.upto_token from_token = int(from_token) if from_token == current_token: - return ([], current_token) + return [], current_token if self._LIMITED: rows = yield self.update_function( @@ -198,7 +198,7 @@ class Stream(object): if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND: raise Exception("stream %s has fallen behind" % (self.NAME)) - return (updates, current_token) + return updates, current_token def current_token(self): """Gets the current token of the underlying streams. Should be provided diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index fa91cc8dee..b4761adaed 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -69,7 +69,7 @@ class UsersRestServlet(RestServlet): ret = yield self.handlers.admin_handler.get_users() - return (200, ret) + return 200, ret class VersionServlet(RestServlet): @@ -120,7 +120,7 @@ class UserRegisterServlet(RestServlet): nonce = self.hs.get_secrets().token_hex(64) self.nonces[nonce] = int(self.reactor.seconds()) - return (200, {"nonce": nonce}) + return 200, {"nonce": nonce} @defer.inlineCallbacks def on_POST(self, request): @@ -212,7 +212,7 @@ class UserRegisterServlet(RestServlet): ) result = yield register._create_registration_details(user_id, body) - return (200, result) + return 200, result class WhoisRestServlet(RestServlet): @@ -237,7 +237,7 @@ class WhoisRestServlet(RestServlet): ret = yield self.handlers.admin_handler.get_whois(target_user) - return (200, ret) + return 200, ret class PurgeHistoryRestServlet(RestServlet): @@ -322,7 +322,7 @@ class PurgeHistoryRestServlet(RestServlet): room_id, token, delete_local_events=delete_local_events ) - return (200, {"purge_id": purge_id}) + return 200, {"purge_id": purge_id} class PurgeHistoryStatusRestServlet(RestServlet): @@ -347,7 +347,7 @@ class PurgeHistoryStatusRestServlet(RestServlet): if purge_status is None: raise NotFoundError("purge id '%s' not found" % purge_id) - return (200, purge_status.asdict()) + return 200, purge_status.asdict() class DeactivateAccountRestServlet(RestServlet): @@ -379,7 +379,7 @@ class DeactivateAccountRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - return (200, {"id_server_unbind_result": id_server_unbind_result}) + return 200, {"id_server_unbind_result": id_server_unbind_result} class ShutdownRoomRestServlet(RestServlet): @@ -549,7 +549,7 @@ class ResetPasswordRestServlet(RestServlet): yield self._set_password_handler.set_password( target_user_id, new_password, requester ) - return (200, {}) + return 200, {} class GetUsersPaginatedRestServlet(RestServlet): @@ -591,7 +591,7 @@ class GetUsersPaginatedRestServlet(RestServlet): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - return (200, ret) + return 200, ret @defer.inlineCallbacks def on_POST(self, request, target_user_id): @@ -619,7 +619,7 @@ class GetUsersPaginatedRestServlet(RestServlet): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - return (200, ret) + return 200, ret class SearchUsersRestServlet(RestServlet): @@ -662,7 +662,7 @@ class SearchUsersRestServlet(RestServlet): logger.info("term: %s ", term) ret = yield self.handlers.admin_handler.search_users(term) - return (200, ret) + return 200, ret class DeleteGroupAdminRestServlet(RestServlet): @@ -685,7 +685,7 @@ class DeleteGroupAdminRestServlet(RestServlet): raise SynapseError(400, "Can only delete local groups") yield self.group_server.delete_group(group_id, requester.user.to_string()) - return (200, {}) + return 200, {} class AccountValidityRenewServlet(RestServlet): @@ -716,7 +716,7 @@ class AccountValidityRenewServlet(RestServlet): ) res = {"expiration_ts": expiration_ts} - return (200, res) + return 200, res ######################################################################################## diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 824df919f2..f3f63f0be7 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -49,7 +49,7 @@ class QuarantineMediaInRoom(RestServlet): room_id, requester.user.to_string() ) - return (200, {"num_quarantined": num_quarantined}) + return 200, {"num_quarantined": num_quarantined} class ListMediaInRoom(RestServlet): @@ -70,7 +70,7 @@ class ListMediaInRoom(RestServlet): local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id) - return (200, {"local": local_mxcs, "remote": remote_mxcs}) + return 200, {"local": local_mxcs, "remote": remote_mxcs} class PurgeMediaCacheRestServlet(RestServlet): @@ -89,7 +89,7 @@ class PurgeMediaCacheRestServlet(RestServlet): ret = yield self.media_repository.delete_old_remote_media(before_ts) - return (200, ret) + return 200, ret def register_servlets_for_media_repo(hs, http_server): diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py index 2922eb543e..f474066542 100644 --- a/synapse/rest/admin/purge_room_servlet.py +++ b/synapse/rest/admin/purge_room_servlet.py @@ -54,4 +54,4 @@ class PurgeRoomServlet(RestServlet): await self.pagination_handler.purge_room(body["room_id"]) - return (200, {}) + return 200, {} diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index 656526fea5..ae2cbe2e0a 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -92,7 +92,7 @@ class SendServerNoticeServlet(RestServlet): event_content=body["content"], ) - return (200, {"event_id": event.event_id}) + return 200, {"event_id": event.event_id} def on_PUT(self, request, txn_id): return self.txns.fetch_or_execute_request( diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 5364117420..9720a3bab0 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -71,7 +71,7 @@ class UserAdminServlet(RestServlet): is_admin = yield self.handlers.admin_handler.get_user_server_admin(target_user) is_admin = bool(is_admin) - return (200, {"admin": is_admin}) + return 200, {"admin": is_admin} @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -97,4 +97,4 @@ class UserAdminServlet(RestServlet): target_user, set_admin_to ) - return (200, {}) + return 200, {} diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 4284738021..4ea3666874 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -54,7 +54,7 @@ class ClientDirectoryServer(RestServlet): dir_handler = self.handlers.directory_handler res = yield dir_handler.get_association(room_alias) - return (200, res) + return 200, res @defer.inlineCallbacks def on_PUT(self, request, room_alias): @@ -87,7 +87,7 @@ class ClientDirectoryServer(RestServlet): requester, room_alias, room_id, servers ) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_DELETE(self, request, room_alias): @@ -102,7 +102,7 @@ class ClientDirectoryServer(RestServlet): service.url, room_alias.to_string(), ) - return (200, {}) + return 200, {} except InvalidClientCredentialsError: # fallback to default user behaviour if they aren't an AS pass @@ -118,7 +118,7 @@ class ClientDirectoryServer(RestServlet): "User %s deleted alias %s", user.to_string(), room_alias.to_string() ) - return (200, {}) + return 200, {} class ClientDirectoryListServer(RestServlet): @@ -136,7 +136,7 @@ class ClientDirectoryListServer(RestServlet): if room is None: raise NotFoundError("Unknown room") - return (200, {"visibility": "public" if room["is_public"] else "private"}) + return 200, {"visibility": "public" if room["is_public"] else "private"} @defer.inlineCallbacks def on_PUT(self, request, room_id): @@ -149,7 +149,7 @@ class ClientDirectoryListServer(RestServlet): requester, room_id, visibility ) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_DELETE(self, request, room_id): @@ -159,7 +159,7 @@ class ClientDirectoryListServer(RestServlet): requester, room_id, "private" ) - return (200, {}) + return 200, {} class ClientAppserviceDirectoryListServer(RestServlet): @@ -193,4 +193,4 @@ class ClientAppserviceDirectoryListServer(RestServlet): requester.app_service.id, network_id, room_id, visibility ) - return (200, {}) + return 200, {} diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 53ebed2203..6651b4cf07 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -67,10 +67,10 @@ class EventStreamRestServlet(RestServlet): is_guest=is_guest, ) - return (200, chunk) + return 200, chunk def on_OPTIONS(self, request): - return (200, {}) + return 200, {} # TODO: Unit test gets, with and without auth, with different kinds of events. @@ -91,9 +91,9 @@ class EventRestServlet(RestServlet): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - return (200, event) + return 200, event else: - return (404, "Event not found.") + return 404, "Event not found." def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 70b8478e90..2da3cd7511 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -42,7 +42,7 @@ class InitialSyncRestServlet(RestServlet): include_archived=include_archived, ) - return (200, content) + return 200, content def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 5762b9fd06..25a1b67092 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -121,10 +121,10 @@ class LoginRestServlet(RestServlet): ({"type": t} for t in self.auth_handler.get_supported_login_types()) ) - return (200, {"flows": flows}) + return 200, {"flows": flows} def on_OPTIONS(self, request): - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_POST(self, request): @@ -152,7 +152,7 @@ class LoginRestServlet(RestServlet): well_known_data = self._well_known_builder.get_well_known() if well_known_data: result["well_known"] = well_known_data - return (200, result) + return 200, result @defer.inlineCallbacks def _do_other_login(self, login_submission): diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 2769f3a189..4785a34d75 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -33,7 +33,7 @@ class LogoutRestServlet(RestServlet): self._device_handler = hs.get_device_handler() def on_OPTIONS(self, request): - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_POST(self, request): @@ -49,7 +49,7 @@ class LogoutRestServlet(RestServlet): requester.user.to_string(), requester.device_id ) - return (200, {}) + return 200, {} class LogoutAllRestServlet(RestServlet): @@ -62,7 +62,7 @@ class LogoutAllRestServlet(RestServlet): self._device_handler = hs.get_device_handler() def on_OPTIONS(self, request): - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_POST(self, request): @@ -75,7 +75,7 @@ class LogoutAllRestServlet(RestServlet): # .. and then delete any access tokens which weren't associated with # devices. yield self._auth_handler.delete_access_tokens_for_user(user_id) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 1eb1068c98..0153525cef 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -56,7 +56,7 @@ class PresenceStatusRestServlet(RestServlet): state = yield self.presence_handler.get_state(target_user=user) state = format_user_presence_state(state, self.clock.time_msec()) - return (200, state) + return 200, state @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -88,10 +88,10 @@ class PresenceStatusRestServlet(RestServlet): if self.hs.config.use_presence: yield self.presence_handler.set_state(user, state) - return (200, {}) + return 200, {} def on_OPTIONS(self, request): - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 2657ae45bb..bbce2e2b71 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -48,7 +48,7 @@ class ProfileDisplaynameRestServlet(RestServlet): if displayname is not None: ret["displayname"] = displayname - return (200, ret) + return 200, ret @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -61,14 +61,14 @@ class ProfileDisplaynameRestServlet(RestServlet): try: new_name = content["displayname"] except Exception: - return (400, "Unable to parse name") + return 400, "Unable to parse name" yield self.profile_handler.set_displayname(user, requester, new_name, is_admin) - return (200, {}) + return 200, {} def on_OPTIONS(self, request, user_id): - return (200, {}) + return 200, {} class ProfileAvatarURLRestServlet(RestServlet): @@ -98,7 +98,7 @@ class ProfileAvatarURLRestServlet(RestServlet): if avatar_url is not None: ret["avatar_url"] = avatar_url - return (200, ret) + return 200, ret @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -110,14 +110,14 @@ class ProfileAvatarURLRestServlet(RestServlet): try: new_name = content["avatar_url"] except Exception: - return (400, "Unable to parse name") + return 400, "Unable to parse name" yield self.profile_handler.set_avatar_url(user, requester, new_name, is_admin) - return (200, {}) + return 200, {} def on_OPTIONS(self, request, user_id): - return (200, {}) + return 200, {} class ProfileRestServlet(RestServlet): @@ -150,7 +150,7 @@ class ProfileRestServlet(RestServlet): if avatar_url is not None: ret["avatar_url"] = avatar_url - return (200, ret) + return 200, ret def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index c3ae8b98a8..9f8c3d09e3 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -69,7 +69,7 @@ class PushRuleRestServlet(RestServlet): if "attr" in spec: yield self.set_rule_attr(user_id, spec, content) self.notify_user(user_id) - return (200, {}) + return 200, {} if spec["rule_id"].startswith("."): # Rule ids starting with '.' are reserved for server default rules. @@ -106,7 +106,7 @@ class PushRuleRestServlet(RestServlet): except RuleNotFoundException as e: raise SynapseError(400, str(e)) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_DELETE(self, request, path): @@ -123,7 +123,7 @@ class PushRuleRestServlet(RestServlet): try: yield self.store.delete_push_rule(user_id, namespaced_rule_id) self.notify_user(user_id) - return (200, {}) + return 200, {} except StoreError as e: if e.code == 404: raise NotFoundError() @@ -151,10 +151,10 @@ class PushRuleRestServlet(RestServlet): ) if path[0] == "": - return (200, rules) + return 200, rules elif path[0] == "global": result = _filter_ruleset_with_path(rules["global"], path[1:]) - return (200, result) + return 200, result else: raise UnrecognizedRequestError() diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index ebc3dec516..41660682d9 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -62,7 +62,7 @@ class PushersRestServlet(RestServlet): if k not in allowed_keys: del p[k] - return (200, {"pushers": pushers}) + return 200, {"pushers": pushers} def on_OPTIONS(self, _): return 200, {} @@ -94,7 +94,7 @@ class PushersSetRestServlet(RestServlet): yield self.pusher_pool.remove_pusher( content["app_id"], content["pushkey"], user_id=user.to_string() ) - return (200, {}) + return 200, {} assert_params_in_dict( content, @@ -143,7 +143,7 @@ class PushersSetRestServlet(RestServlet): self.notifier.on_new_replication_data() - return (200, {}) + return 200, {} def on_OPTIONS(self, _): return 200, {} diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 4b2344e696..f244e8f469 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -91,14 +91,14 @@ class RoomCreateRestServlet(TransactionRestServlet): requester, self.get_room_config(request) ) - return (200, info) + return 200, info def get_room_config(self, request): user_supplied_config = parse_json_object_from_request(request) return user_supplied_config def on_OPTIONS(self, request): - return (200, {}) + return 200, {} # TODO: Needs unit testing for generic events @@ -173,9 +173,9 @@ class RoomStateEventRestServlet(TransactionRestServlet): if format == "event": event = format_event_for_client_v2(data.get_dict()) - return (200, event) + return 200, event elif format == "content": - return (200, data.get_dict()["content"]) + return 200, data.get_dict()["content"] @defer.inlineCallbacks def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): @@ -210,7 +210,7 @@ class RoomStateEventRestServlet(TransactionRestServlet): ret = {} if event: ret = {"event_id": event.event_id} - return (200, ret) + return 200, ret # TODO: Needs unit testing for generic events + feedback @@ -244,10 +244,10 @@ class RoomSendEventRestServlet(TransactionRestServlet): requester, event_dict, txn_id=txn_id ) - return (200, {"event_id": event.event_id}) + return 200, {"event_id": event.event_id} def on_GET(self, request, room_id, event_type, txn_id): - return (200, "Not implemented") + return 200, "Not implemented" def on_PUT(self, request, room_id, event_type, txn_id): return self.txns.fetch_or_execute_request( @@ -307,7 +307,7 @@ class JoinRoomAliasServlet(TransactionRestServlet): third_party_signed=content.get("third_party_signed", None), ) - return (200, {"room_id": room_id}) + return 200, {"room_id": room_id} def on_PUT(self, request, room_identifier, txn_id): return self.txns.fetch_or_execute_request( @@ -360,7 +360,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): limit=limit, since_token=since_token ) - return (200, data) + return 200, data @defer.inlineCallbacks def on_POST(self, request): @@ -405,7 +405,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): network_tuple=network_tuple, ) - return (200, data) + return 200, data # TODO: Needs unit testing @@ -456,7 +456,7 @@ class RoomMemberListRestServlet(RestServlet): continue chunk.append(event) - return (200, {"chunk": chunk}) + return 200, {"chunk": chunk} # deprecated in favour of /members?membership=join? @@ -477,7 +477,7 @@ class JoinedRoomMemberListRestServlet(RestServlet): requester, room_id ) - return (200, {"joined": users_with_profile}) + return 200, {"joined": users_with_profile} # TODO: Needs better unit testing @@ -510,7 +510,7 @@ class RoomMessageListRestServlet(RestServlet): event_filter=event_filter, ) - return (200, msgs) + return 200, msgs # TODO: Needs unit testing @@ -531,7 +531,7 @@ class RoomStateRestServlet(RestServlet): user_id=requester.user.to_string(), is_guest=requester.is_guest, ) - return (200, events) + return 200, events # TODO: Needs unit testing @@ -550,7 +550,7 @@ class RoomInitialSyncRestServlet(RestServlet): content = yield self.initial_sync_handler.room_initial_sync( room_id=room_id, requester=requester, pagin_config=pagination_config ) - return (200, content) + return 200, content class RoomEventServlet(RestServlet): @@ -581,7 +581,7 @@ class RoomEventServlet(RestServlet): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - return (200, event) + return 200, event return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) @@ -633,7 +633,7 @@ class RoomEventContextServlet(RestServlet): results["state"], time_now ) - return (200, results) + return 200, results class RoomForgetRestServlet(TransactionRestServlet): @@ -652,7 +652,7 @@ class RoomForgetRestServlet(TransactionRestServlet): yield self.room_member_handler.forget(user=requester.user, room_id=room_id) - return (200, {}) + return 200, {} def on_PUT(self, request, room_id, txn_id): return self.txns.fetch_or_execute_request( @@ -702,7 +702,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): requester, txn_id, ) - return (200, {}) + return 200, {} return target = requester.user @@ -729,7 +729,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): if membership_action == "join": return_value["room_id"] = room_id - return (200, return_value) + return 200, return_value def _has_3pid_invite_keys(self, content): for key in {"id_server", "medium", "address"}: @@ -771,7 +771,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): txn_id=txn_id, ) - return (200, {"event_id": event.event_id}) + return 200, {"event_id": event.event_id} def on_PUT(self, request, room_id, event_id, txn_id): return self.txns.fetch_or_execute_request( @@ -816,7 +816,7 @@ class RoomTypingRestServlet(RestServlet): target_user=target_user, auth_user=requester.user, room_id=room_id ) - return (200, {}) + return 200, {} class SearchRestServlet(RestServlet): @@ -838,7 +838,7 @@ class SearchRestServlet(RestServlet): requester.user, content, batch ) - return (200, results) + return 200, results class JoinedRoomsRestServlet(RestServlet): @@ -854,7 +854,7 @@ class JoinedRoomsRestServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=True) room_ids = yield self.store.get_rooms_for_user(requester.user.to_string()) - return (200, {"joined_rooms": list(room_ids)}) + return 200, {"joined_rooms": list(room_ids)} def register_txn_path(servlet, regex_string, http_server, with_get=False): diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 497cddf8b8..2afdbb89e5 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -60,7 +60,7 @@ class VoipRestServlet(RestServlet): password = turnPassword else: - return (200, {}) + return 200, {} return ( 200, @@ -73,7 +73,7 @@ class VoipRestServlet(RestServlet): ) def on_OPTIONS(self, request): - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 934ed5d16d..0620a4d0cf 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -117,7 +117,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # Wrap the session id in a JSON object ret = {"sid": sid} - return (200, ret) + return 200, ret @defer.inlineCallbacks def send_password_reset(self, email, client_secret, send_attempt, next_link=None): @@ -221,7 +221,7 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND) ret = yield self.identity_handler.requestMsisdnToken(**body) - return (200, ret) + return 200, ret class PasswordResetSubmitTokenServlet(RestServlet): @@ -330,7 +330,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): ) response_code = 200 if valid else 400 - return (response_code, {"success": valid}) + return response_code, {"success": valid} class PasswordRestServlet(RestServlet): @@ -399,7 +399,7 @@ class PasswordRestServlet(RestServlet): yield self._set_password_handler.set_password(user_id, new_password, requester) - return (200, {}) + return 200, {} def on_OPTIONS(self, _): return 200, {} @@ -434,7 +434,7 @@ class DeactivateAccountRestServlet(RestServlet): yield self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase ) - return (200, {}) + return 200, {} yield self.auth_handler.validate_user_via_ui_auth( requester, body, self.hs.get_ip_from_request(request) @@ -447,7 +447,7 @@ class DeactivateAccountRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - return (200, {"id_server_unbind_result": id_server_unbind_result}) + return 200, {"id_server_unbind_result": id_server_unbind_result} class EmailThreepidRequestTokenRestServlet(RestServlet): @@ -481,7 +481,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) - return (200, ret) + return 200, ret class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -516,7 +516,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestMsisdnToken(**body) - return (200, ret) + return 200, ret class ThreepidRestServlet(RestServlet): @@ -536,7 +536,7 @@ class ThreepidRestServlet(RestServlet): threepids = yield self.datastore.user_get_threepids(requester.user.to_string()) - return (200, {"threepids": threepids}) + return 200, {"threepids": threepids} @defer.inlineCallbacks def on_POST(self, request): @@ -568,7 +568,7 @@ class ThreepidRestServlet(RestServlet): logger.debug("Binding threepid %s to %s", threepid, user_id) yield self.identity_handler.bind_threepid(threePidCreds, user_id) - return (200, {}) + return 200, {} class ThreepidDeleteRestServlet(RestServlet): @@ -603,7 +603,7 @@ class ThreepidDeleteRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - return (200, {"id_server_unbind_result": id_server_unbind_result}) + return 200, {"id_server_unbind_result": id_server_unbind_result} class WhoamiRestServlet(RestServlet): @@ -617,7 +617,7 @@ class WhoamiRestServlet(RestServlet): def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - return (200, {"user_id": requester.user.to_string()}) + return 200, {"user_id": requester.user.to_string()} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index 98f2f6f4b5..f0db204ffa 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -55,7 +55,7 @@ class AccountDataServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_GET(self, request, user_id, account_data_type): @@ -70,7 +70,7 @@ class AccountDataServlet(RestServlet): if event is None: raise NotFoundError("Account data not found") - return (200, event) + return 200, event class RoomAccountDataServlet(RestServlet): @@ -112,7 +112,7 @@ class RoomAccountDataServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_GET(self, request, user_id, room_id, account_data_type): @@ -127,7 +127,7 @@ class RoomAccountDataServlet(RestServlet): if event is None: raise NotFoundError("Room account data not found") - return (200, event) + return 200, event def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index a4fa45fe11..acd58af193 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -58,7 +58,7 @@ class CapabilitiesRestServlet(RestServlet): "m.change_password": {"enabled": change_password}, } } - return (200, response) + return 200, response def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 9adf76cc0c..26d0235208 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -48,7 +48,7 @@ class DevicesRestServlet(RestServlet): devices = yield self.device_handler.get_devices_by_user( requester.user.to_string() ) - return (200, {"devices": devices}) + return 200, {"devices": devices} class DeleteDevicesRestServlet(RestServlet): @@ -91,7 +91,7 @@ class DeleteDevicesRestServlet(RestServlet): yield self.device_handler.delete_devices( requester.user.to_string(), body["devices"] ) - return (200, {}) + return 200, {} class DeviceRestServlet(RestServlet): @@ -114,7 +114,7 @@ class DeviceRestServlet(RestServlet): device = yield self.device_handler.get_device( requester.user.to_string(), device_id ) - return (200, device) + return 200, device @interactive_auth_handler @defer.inlineCallbacks @@ -137,7 +137,7 @@ class DeviceRestServlet(RestServlet): ) yield self.device_handler.delete_device(requester.user.to_string(), device_id) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_PUT(self, request, device_id): @@ -147,7 +147,7 @@ class DeviceRestServlet(RestServlet): yield self.device_handler.update_device( requester.user.to_string(), device_id, body ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 22be0ee3c5..c6ddf24c8d 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -56,7 +56,7 @@ class GetFilterRestServlet(RestServlet): user_localpart=target_user.localpart, filter_id=filter_id ) - return (200, filter.get_filter_json()) + return 200, filter.get_filter_json() except (KeyError, StoreError): raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND) @@ -89,7 +89,7 @@ class CreateFilterRestServlet(RestServlet): user_localpart=target_user.localpart, user_filter=content ) - return (200, {"filter_id": str(filter_id)}) + return 200, {"filter_id": str(filter_id)} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index e629c4256d..999a0fa80c 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -47,7 +47,7 @@ class GroupServlet(RestServlet): group_id, requester_user_id ) - return (200, group_description) + return 200, group_description @defer.inlineCallbacks def on_POST(self, request, group_id): @@ -59,7 +59,7 @@ class GroupServlet(RestServlet): group_id, requester_user_id, content ) - return (200, {}) + return 200, {} class GroupSummaryServlet(RestServlet): @@ -83,7 +83,7 @@ class GroupSummaryServlet(RestServlet): group_id, requester_user_id ) - return (200, get_group_summary) + return 200, get_group_summary class GroupSummaryRoomsCatServlet(RestServlet): @@ -120,7 +120,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): content=content, ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id, room_id): @@ -131,7 +131,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): group_id, requester_user_id, room_id=room_id, category_id=category_id ) - return (200, resp) + return 200, resp class GroupCategoryServlet(RestServlet): @@ -157,7 +157,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id ) - return (200, category) + return 200, category @defer.inlineCallbacks def on_PUT(self, request, group_id, category_id): @@ -169,7 +169,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id, content=content ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id): @@ -180,7 +180,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id ) - return (200, resp) + return 200, resp class GroupCategoriesServlet(RestServlet): @@ -204,7 +204,7 @@ class GroupCategoriesServlet(RestServlet): group_id, requester_user_id ) - return (200, category) + return 200, category class GroupRoleServlet(RestServlet): @@ -228,7 +228,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id ) - return (200, category) + return 200, category @defer.inlineCallbacks def on_PUT(self, request, group_id, role_id): @@ -240,7 +240,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id, content=content ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id): @@ -251,7 +251,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id ) - return (200, resp) + return 200, resp class GroupRolesServlet(RestServlet): @@ -275,7 +275,7 @@ class GroupRolesServlet(RestServlet): group_id, requester_user_id ) - return (200, category) + return 200, category class GroupSummaryUsersRoleServlet(RestServlet): @@ -312,7 +312,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): content=content, ) - return (200, resp) + return 200, resp @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id, user_id): @@ -323,7 +323,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): group_id, requester_user_id, user_id=user_id, role_id=role_id ) - return (200, resp) + return 200, resp class GroupRoomServlet(RestServlet): @@ -347,7 +347,7 @@ class GroupRoomServlet(RestServlet): group_id, requester_user_id ) - return (200, result) + return 200, result class GroupUsersServlet(RestServlet): @@ -371,7 +371,7 @@ class GroupUsersServlet(RestServlet): group_id, requester_user_id ) - return (200, result) + return 200, result class GroupInvitedUsersServlet(RestServlet): @@ -395,7 +395,7 @@ class GroupInvitedUsersServlet(RestServlet): group_id, requester_user_id ) - return (200, result) + return 200, result class GroupSettingJoinPolicyServlet(RestServlet): @@ -420,7 +420,7 @@ class GroupSettingJoinPolicyServlet(RestServlet): group_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupCreateServlet(RestServlet): @@ -450,7 +450,7 @@ class GroupCreateServlet(RestServlet): group_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupAdminRoomsServlet(RestServlet): @@ -477,7 +477,7 @@ class GroupAdminRoomsServlet(RestServlet): group_id, requester_user_id, room_id, content ) - return (200, result) + return 200, result @defer.inlineCallbacks def on_DELETE(self, request, group_id, room_id): @@ -488,7 +488,7 @@ class GroupAdminRoomsServlet(RestServlet): group_id, requester_user_id, room_id ) - return (200, result) + return 200, result class GroupAdminRoomsConfigServlet(RestServlet): @@ -516,7 +516,7 @@ class GroupAdminRoomsConfigServlet(RestServlet): group_id, requester_user_id, room_id, config_key, content ) - return (200, result) + return 200, result class GroupAdminUsersInviteServlet(RestServlet): @@ -546,7 +546,7 @@ class GroupAdminUsersInviteServlet(RestServlet): group_id, user_id, requester_user_id, config ) - return (200, result) + return 200, result class GroupAdminUsersKickServlet(RestServlet): @@ -573,7 +573,7 @@ class GroupAdminUsersKickServlet(RestServlet): group_id, user_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupSelfLeaveServlet(RestServlet): @@ -598,7 +598,7 @@ class GroupSelfLeaveServlet(RestServlet): group_id, requester_user_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupSelfJoinServlet(RestServlet): @@ -623,7 +623,7 @@ class GroupSelfJoinServlet(RestServlet): group_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupSelfAcceptInviteServlet(RestServlet): @@ -648,7 +648,7 @@ class GroupSelfAcceptInviteServlet(RestServlet): group_id, requester_user_id, content ) - return (200, result) + return 200, result class GroupSelfUpdatePublicityServlet(RestServlet): @@ -672,7 +672,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet): publicise = content["publicise"] yield self.store.update_group_publicity(group_id, requester_user_id, publicise) - return (200, {}) + return 200, {} class PublicisedGroupsForUserServlet(RestServlet): @@ -694,7 +694,7 @@ class PublicisedGroupsForUserServlet(RestServlet): result = yield self.groups_handler.get_publicised_groups_for_user(user_id) - return (200, result) + return 200, result class PublicisedGroupsForUsersServlet(RestServlet): @@ -719,7 +719,7 @@ class PublicisedGroupsForUsersServlet(RestServlet): result = yield self.groups_handler.bulk_get_publicised_groups(user_ids) - return (200, result) + return 200, result class GroupsForUserServlet(RestServlet): @@ -741,7 +741,7 @@ class GroupsForUserServlet(RestServlet): result = yield self.groups_handler.get_joined_groups(requester_user_id) - return (200, result) + return 200, result def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index b218a3f334..64b6898eb8 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -105,7 +105,7 @@ class KeyUploadServlet(RestServlet): result = yield self.e2e_keys_handler.upload_keys_for_user( user_id, device_id, body ) - return (200, result) + return 200, result class KeyQueryServlet(RestServlet): @@ -159,7 +159,7 @@ class KeyQueryServlet(RestServlet): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.query_devices(body, timeout) - return (200, result) + return 200, result class KeyChangesServlet(RestServlet): @@ -200,7 +200,7 @@ class KeyChangesServlet(RestServlet): results = yield self.device_handler.get_user_ids_changed(user_id, from_token) - return (200, results) + return 200, results class OneTimeKeyServlet(RestServlet): @@ -235,7 +235,7 @@ class OneTimeKeyServlet(RestServlet): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.claim_one_time_keys(body, timeout) - return (200, result) + return 200, result def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index d034863a3c..10c1ad5b07 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -88,7 +88,7 @@ class NotificationsServlet(RestServlet): returned_push_actions.append(returned_pa) next_token = str(pa["stream_ordering"]) - return (200, {"notifications": returned_push_actions, "next_token": next_token}) + return 200, {"notifications": returned_push_actions, "next_token": next_token} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index d93d6a9f24..b3bf8567e1 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -59,7 +59,7 @@ class ReadMarkerRestServlet(RestServlet): event_id=read_marker_event_id, ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 98a97b7059..0dab03d227 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -52,7 +52,7 @@ class ReceiptRestServlet(RestServlet): room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 9510a1e2b0..65f9fce2ff 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -94,7 +94,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) - return (200, ret) + return 200, ret class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -137,7 +137,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): ) ret = yield self.identity_handler.requestMsisdnToken(**body) - return (200, ret) + return 200, ret class UsernameAvailabilityRestServlet(RestServlet): @@ -177,7 +177,7 @@ class UsernameAvailabilityRestServlet(RestServlet): yield self.registration_handler.check_username(username) - return (200, {"available": True}) + return 200, {"available": True} class RegisterRestServlet(RestServlet): @@ -279,7 +279,7 @@ class RegisterRestServlet(RestServlet): result = yield self._do_appservice_registration( desired_username, access_token, body ) - return (200, result) # we throw for non 200 responses + return 200, result # we throw for non 200 responses return # for regular registration, downcase the provided username before @@ -487,7 +487,7 @@ class RegisterRestServlet(RestServlet): bind_msisdn=params.get("bind_msisdn"), ) - return (200, return_dict) + return 200, return_dict def on_OPTIONS(self, _): return 200, {} diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 1538b247e5..040b37c504 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -118,7 +118,7 @@ class RelationSendServlet(RestServlet): requester, event_dict=event_dict, txn_id=txn_id ) - return (200, {"event_id": event.event_id}) + return 200, {"event_id": event.event_id} class RelationPaginationServlet(RestServlet): @@ -198,7 +198,7 @@ class RelationPaginationServlet(RestServlet): return_value["chunk"] = events return_value["original_event"] = original_event - return (200, return_value) + return 200, return_value class RelationAggregationPaginationServlet(RestServlet): @@ -270,7 +270,7 @@ class RelationAggregationPaginationServlet(RestServlet): to_token=to_token, ) - return (200, pagination_chunk.to_dict()) + return 200, pagination_chunk.to_dict() class RelationAggregationGroupPaginationServlet(RestServlet): @@ -356,7 +356,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): return_value = result.to_dict() return_value["chunk"] = events - return (200, return_value) + return 200, return_value def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 3fdd4584a3..e7449864cd 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -72,7 +72,7 @@ class ReportEventRestServlet(RestServlet): received_ts=self.clock.time_msec(), ) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 10dec96208..df4f44cd36 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -135,7 +135,7 @@ class RoomKeysServlet(RestServlet): body = {"rooms": {room_id: body}} yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): @@ -218,7 +218,7 @@ class RoomKeysServlet(RestServlet): else: room_keys = room_keys["rooms"][room_id] - return (200, room_keys) + return 200, room_keys @defer.inlineCallbacks def on_DELETE(self, request, room_id, session_id): @@ -242,7 +242,7 @@ class RoomKeysServlet(RestServlet): yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id ) - return (200, {}) + return 200, {} class RoomKeysNewVersionServlet(RestServlet): @@ -293,7 +293,7 @@ class RoomKeysNewVersionServlet(RestServlet): info = parse_json_object_from_request(request) new_version = yield self.e2e_room_keys_handler.create_version(user_id, info) - return (200, {"version": new_version}) + return 200, {"version": new_version} # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning @@ -338,7 +338,7 @@ class RoomKeysVersionServlet(RestServlet): except SynapseError as e: if e.code == 404: raise SynapseError(404, "No backup found", Codes.NOT_FOUND) - return (200, info) + return 200, info @defer.inlineCallbacks def on_DELETE(self, request, version): @@ -358,7 +358,7 @@ class RoomKeysVersionServlet(RestServlet): user_id = requester.user.to_string() yield self.e2e_room_keys_handler.delete_version(user_id, version) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_PUT(self, request, version): @@ -392,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet): ) yield self.e2e_room_keys_handler.update_version(user_id, version, info) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 14ba61a63e..d2c3316eb7 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -80,7 +80,7 @@ class RoomUpgradeRestServlet(RestServlet): ret = {"replacement_room": new_room_id} - return (200, ret) + return 200, ret def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 7b32dd2212..c98c5a3802 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -174,7 +174,7 @@ class SyncRestServlet(RestServlet): time_now, sync_result, requester.access_token_id, filter ) - return (200, response_content) + return 200, response_content @defer.inlineCallbacks def encode_response(self, time_now, sync_result, access_token_id, filter): diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index d173544355..3b555669a0 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -45,7 +45,7 @@ class TagListServlet(RestServlet): tags = yield self.store.get_tags_for_room(user_id, room_id) - return (200, {"tags": tags}) + return 200, {"tags": tags} class TagServlet(RestServlet): @@ -76,7 +76,7 @@ class TagServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - return (200, {}) + return 200, {} @defer.inlineCallbacks def on_DELETE(self, request, user_id, room_id, tag): @@ -88,7 +88,7 @@ class TagServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - return (200, {}) + return 200, {} def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 158e686b01..2e8d672471 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -40,7 +40,7 @@ class ThirdPartyProtocolsServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) protocols = yield self.appservice_handler.get_3pe_protocols() - return (200, protocols) + return 200, protocols class ThirdPartyProtocolServlet(RestServlet): @@ -60,9 +60,9 @@ class ThirdPartyProtocolServlet(RestServlet): only_protocol=protocol ) if protocol in protocols: - return (200, protocols[protocol]) + return 200, protocols[protocol] else: - return (404, {"error": "Unknown protocol"}) + return 404, {"error": "Unknown protocol"} class ThirdPartyUserServlet(RestServlet): @@ -85,7 +85,7 @@ class ThirdPartyUserServlet(RestServlet): ThirdPartyEntityKind.USER, protocol, fields ) - return (200, results) + return 200, results class ThirdPartyLocationServlet(RestServlet): @@ -108,7 +108,7 @@ class ThirdPartyLocationServlet(RestServlet): ThirdPartyEntityKind.LOCATION, protocol, fields ) - return (200, results) + return 200, results def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index 7ab2b80e46..2863affbab 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -60,7 +60,7 @@ class UserDirectorySearchRestServlet(RestServlet): user_id = requester.user.to_string() if not self.hs.config.user_directory_search_enabled: - return (200, {"limited": False, "results": []}) + return 200, {"limited": False, "results": []} body = parse_json_object_from_request(request) @@ -76,7 +76,7 @@ class UserDirectorySearchRestServlet(RestServlet): user_id, search_term, limit ) - return (200, results) + return 200, results def register_servlets(hs, http_server): diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index cf5759e9a6..d4ea09260c 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -318,14 +318,14 @@ class MediaRepository(object): responder = yield self.media_storage.fetch_media(file_info) if responder: - return (responder, media_info) + return responder, media_info # Failed to find the file anywhere, lets download it. media_info = yield self._download_remote_file(server_name, media_id, file_id) responder = yield self.media_storage.fetch_media(file_info) - return (responder, media_info) + return responder, media_info @defer.inlineCallbacks def _download_remote_file(self, server_name, media_id, file_id): diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 90d8e6bffe..c995d7e043 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -78,9 +78,9 @@ class Thumbnailer(object): """ if max_width * self.height < max_height * self.width: - return (max_width, (max_width * self.height) // self.width) + return max_width, (max_width * self.height) // self.width else: - return ((max_height * self.width) // self.height, max_height) + return (max_height * self.width) // self.height, max_height def scale(self, width, height, output_type): """Rescales the image to the given dimensions. diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 729c097e6d..81c4aff496 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -193,4 +193,4 @@ class ResourceLimitsServerNotices(object): if event_id in referenced_events: referenced_events.remove(event.event_id) - return (currently_blocked, referenced_events) + return currently_blocked, referenced_events diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 9fa5b4f3d6..6afbfc0d74 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -90,7 +90,7 @@ class AccountDataWorkerStore(SQLBaseStore): room_data = by_room.setdefault(row["room_id"], {}) room_data[row["account_data_type"]] = json.loads(row["content"]) - return (global_account_data, by_room) + return global_account_data, by_room return self.runInteraction( "get_account_data_for_user", get_account_data_for_user_txn @@ -205,7 +205,7 @@ class AccountDataWorkerStore(SQLBaseStore): ) txn.execute(sql, (last_room_id, current_id, limit)) room_results = txn.fetchall() - return (global_results, room_results) + return global_results, room_results return self.runInteraction( "get_all_updated_account_data_txn", get_updated_account_data_txn @@ -244,13 +244,13 @@ class AccountDataWorkerStore(SQLBaseStore): room_account_data = account_data_by_room.setdefault(row[0], {}) room_account_data[row[1]] = json.loads(row[2]) - return (global_account_data, account_data_by_room) + return global_account_data, account_data_by_room changed = self._account_data_stream_cache.has_entity_changed( user_id, int(stream_id) ) if not changed: - return ({}, {}) + return {}, {} return self.runInteraction( "get_updated_account_data_for_user", get_updated_account_data_for_user_txn diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 05d9c05c3f..36657753cd 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -358,7 +358,7 @@ class ApplicationServiceTransactionWorkerStore( events = yield self.get_events_as_list(event_ids) - return (upper_bound, events) + return upper_bound, events class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore): diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 79bb0ea46d..4dca9de617 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -66,7 +66,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): messages.append(json.loads(row[1])) if len(messages) < limit: stream_pos = current_stream_id - return (messages, stream_pos) + return messages, stream_pos return self.runInteraction( "get_new_messages_for_device", get_new_messages_for_device_txn @@ -157,7 +157,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): messages.append(json.loads(row[1])) if len(messages) < limit: stream_pos = current_stream_id - return (messages, stream_pos) + return messages, stream_pos return self.runInteraction( "get_new_device_msgs_for_remote", diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index e11881161d..76542c512d 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -94,7 +94,7 @@ class DeviceWorkerStore(SQLBaseStore): destination, int(from_stream_id) ) if not has_changed: - return (now_stream_id, []) + return now_stream_id, [] # We retrieve n+1 devices from the list of outbound pokes where n is # our outbound device update limit. We then check if the very last @@ -117,7 +117,7 @@ class DeviceWorkerStore(SQLBaseStore): # Return an empty list if there are no updates if not updates: - return (now_stream_id, []) + return now_stream_id, [] # if we have exceeded the limit, we need to exclude any results with the # same stream_id as the last row. @@ -167,13 +167,13 @@ class DeviceWorkerStore(SQLBaseStore): # skip that stream_id and return an empty list, and continue with the next # stream_id next time. if not query_map: - return (stream_id_cutoff, []) + return stream_id_cutoff, [] results = yield self._get_device_update_edus_by_remote( destination, from_stream_id, query_map ) - return (now_stream_id, results) + return now_stream_id, results def _get_devices_by_remote_txn( self, txn, destination, from_stream_id, now_stream_id, limit @@ -352,7 +352,7 @@ class DeviceWorkerStore(SQLBaseStore): else: results[user_id] = yield self._get_cached_devices_for_user(user_id) - return (user_ids_not_in_cache, results) + return user_ids_not_in_cache, results @cachedInlineCallbacks(num_args=2, tree=True) def _get_cached_user_device(self, user_id, device_id): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 5a95c36a8b..32050868ff 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -810,7 +810,7 @@ class EventsStore( # If they old and new groups are the same then we don't need to do # anything. if old_state_groups == new_state_groups: - return (None, None) + return None, None if len(new_state_groups) == 1 and len(old_state_groups) == 1: # If we're going from one state group to another, lets check if @@ -827,7 +827,7 @@ class EventsStore( # the current state in memory then lets also return that, # but it doesn't matter if we don't. new_state = state_groups_map.get(new_state_group) - return (new_state, delta_ids) + return new_state, delta_ids # Now that we have calculated new_state_groups we need to get # their state IDs so we can resolve to a single state set. @@ -839,7 +839,7 @@ class EventsStore( if len(new_state_groups) == 1: # If there is only one state group, then we know what the current # state is. - return (state_groups_map[new_state_groups.pop()], None) + return state_groups_map[new_state_groups.pop()], None # Ok, we need to defer to the state handler to resolve our state sets. @@ -868,7 +868,7 @@ class EventsStore( state_res_store=StateResolutionStore(self), ) - return (res.state, None) + return res.state, None @defer.inlineCallbacks def _calculate_state_delta(self, room_id, current_state): @@ -891,7 +891,7 @@ class EventsStore( if ev_id != existing_state.get(key) } - return (to_delete, to_insert) + return to_delete, to_insert @log_function def _persist_events_txn( diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 1a0f2d5768..5db6f2d84a 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -90,7 +90,7 @@ class PresenceStore(SQLBaseStore): presence_states, ) - return (stream_orderings[-1], self._presence_id_gen.get_current_token()) + return stream_orderings[-1], self._presence_id_gen.get_current_token() def _update_presence_txn(self, txn, stream_orderings, presence_states): for stream_id, state in zip(stream_orderings, presence_states): diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index b431d24b8a..3e0e834a62 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -133,7 +133,7 @@ class PusherWorkerStore(SQLBaseStore): txn.execute(sql, (last_id, current_id, limit)) deleted = txn.fetchall() - return (updated, deleted) + return updated, deleted return self.runInteraction( "get_all_updated_pushers", get_all_updated_pushers_txn diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 6aa6d98ebb..290ddb30e8 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -478,7 +478,7 @@ class ReceiptsStore(ReceiptsWorkerStore): max_persisted_id = self._receipts_id_gen.get_current_token() - return (stream_id, max_persisted_id) + return stream_id, max_persisted_id def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data): return self.runInteraction( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 856c2ee8d8..490454f19a 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -364,7 +364,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): the chunk of events returned. """ if from_key == to_key: - return ([], from_key) + return [], from_key from_id = RoomStreamToken.parse_stream_token(from_key).stream to_id = RoomStreamToken.parse_stream_token(to_key).stream @@ -374,7 +374,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) if not has_changed: - return ([], from_key) + return [], from_key def f(txn): sql = ( @@ -407,7 +407,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # get. key = from_key - return (ret, key) + return ret, key @defer.inlineCallbacks def get_membership_changes_for_user(self, user_id, from_key, to_key): @@ -496,7 +496,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ # Allow a zero limit here, and no-op. if limit == 0: - return ([], end_token) + return [], end_token end_token = RoomStreamToken.parse(end_token) @@ -511,7 +511,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # We want to return the results in ascending order. rows.reverse() - return (rows, token) + return rows, token def get_room_event_after_stream_ordering(self, room_id, stream_ordering): """Gets details of the first event in a room at or after a stream ordering @@ -783,7 +783,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): events = yield self.get_events_as_list(event_ids) - return (upper_bound, events) + return upper_bound, events def get_federation_out_pos(self, typ): return self._simple_select_one_onecol( diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index f1c8d99419..cbb0a4810a 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -195,6 +195,6 @@ class ChainedIdGenerator(object): with self._lock: if self._unfinished_ids: stream_id, chained_id = self._unfinished_ids[0] - return (stream_id - 1, chained_id) + return stream_id - 1, chained_id - return (self._current_max, self.chained_generator.get_current_token()) + return self._current_max, self.chained_generator.get_current_token() diff --git a/synapse/streams/config.py b/synapse/streams/config.py index f7f5906a99..02994ab2a5 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -37,7 +37,7 @@ class SourcePaginationConfig(object): self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None def __repr__(self): - return ("StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)") % ( + return "StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)" % ( self.from_key, self.to_key, self.direction, diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 0ad0a88165..e10296a5e4 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -283,4 +283,4 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user, requester, displayname, by_admin=True ) - return (user_id, token) + return user_id, token diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index bb867150f4..ab4d7d70d0 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -472,7 +472,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): added_at=now, ) ) - return (user_id, tok) + return user_id, tok def test_manual_email_send_expired_account(self): user_id = self.register_user("kermit", "monkey") diff --git a/tests/server.py b/tests/server.py index c8269619b1..e397ebe8fa 100644 --- a/tests/server.py +++ b/tests/server.py @@ -338,7 +338,7 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): def get_clock(): clock = ThreadedMemoryReactorClock() hs_clock = Clock(clock) - return (clock, hs_clock) + return clock, hs_clock @attr.s(cmp=False) diff --git a/tests/test_server.py b/tests/test_server.py index 2a7d407c98..98fef21d55 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -57,7 +57,7 @@ class JsonResourceTests(unittest.TestCase): def _callback(request, **kwargs): got_kwargs.update(kwargs) - return (200, kwargs) + return 200, kwargs res = JsonResource(self.homeserver) res.register_paths( diff --git a/tests/test_state.py b/tests/test_state.py index 6d33566f47..610ec9fb46 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -106,7 +106,7 @@ class StateGroupStore(object): } def get_state_group_delta(self, name): - return (None, None) + return None, None def register_events(self, events): for e in events: diff --git a/tests/utils.py b/tests/utils.py index f1eb9a545c..46ef2959f2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -464,7 +464,7 @@ class MockHttpResource(HttpServer): args = [urlparse.unquote(u) for u in matcher.groups()] (code, response) = yield func(mock_request, *args) - return (code, response) + return code, response except CodeMessageException as e: return (e.code, cs_error(e.msg, code=e.errcode)) -- cgit 1.4.1 From 2a012e8a049858fe379599207fac34b6b2e11101 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 30 Aug 2019 17:13:37 +0100 Subject: Revert "Add m.id_access_token flag (#5930)" (#5945) This reverts commit 4765f0cfd95f6160f32c75481651d125f343cd58. --- changelog.d/5930.misc | 1 - synapse/rest/client/versions.py | 7 +------ 2 files changed, 1 insertion(+), 7 deletions(-) delete mode 100644 changelog.d/5930.misc diff --git a/changelog.d/5930.misc b/changelog.d/5930.misc deleted file mode 100644 index 81dcc10e6d..0000000000 --- a/changelog.d/5930.misc +++ /dev/null @@ -1 +0,0 @@ -Add temporary flag to /versions in unstable_features to indicate this Synapse supports receiving id_access_token parameters on calls to identity server-proxying endpoints. \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index c51c9e617d..0e09191632 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -44,12 +44,7 @@ class VersionsRestServlet(RestServlet): "r0.5.0", ], # as per MSC1497: - "unstable_features": { - "m.lazy_load_members": True, - # as per https://github.com/matrix-org/synapse/issues/5927 - # to be removed in r0.6.0 - "m.id_access_token": True, - }, + "unstable_features": {"m.lazy_load_members": True}, }, ) -- cgit 1.4.1 From cee00a3584970c8f00f86d265df392adb8f216d7 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 2 Sep 2019 05:27:39 -0500 Subject: Update INSTALL.md to say that Python 2 is no longer supported (#5953) Signed-off-by: Aaron Raimist --- INSTALL.md | 4 ++-- changelog.d/5953.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5953.misc diff --git a/INSTALL.md b/INSTALL.md index 5728882460..6bce370ea8 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.5, 3.6, 3.7, or 2.7 +- Python 3.5, 3.6, or 3.7 - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org Synapse is written in Python but some of the libraries it uses are written in @@ -421,7 +421,7 @@ If Synapse is not configured with an SMTP server, password reset via email will The easiest way to create a new user is to do so from a client like [Riot](https://riot.im). -Alternatively you can do so from the command line if you have installed via pip. +Alternatively you can do so from the command line if you have installed via pip. This can be done as follows: diff --git a/changelog.d/5953.misc b/changelog.d/5953.misc new file mode 100644 index 0000000000..38e885f42a --- /dev/null +++ b/changelog.d/5953.misc @@ -0,0 +1 @@ +Update INSTALL.md to say that Python 2 is no longer supported. -- cgit 1.4.1 From ce7803b8b030f7359dd1c394d1c874dbfdd79f36 Mon Sep 17 00:00:00 2001 From: L0ric0 Date: Mon, 2 Sep 2019 13:18:41 +0200 Subject: fix thumbnail storage location (#5915) * fix thumbnail storage location Signed-off-by: Lorenz Steinert * Add changelog file. Signed-off-by: Lorenz Steinert * Update Changelog Signed-off-by: Lorenz Steinert --- changelog.d/5915.bugfix | 1 + synapse/rest/media/v1/media_repository.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5915.bugfix diff --git a/changelog.d/5915.bugfix b/changelog.d/5915.bugfix new file mode 100644 index 0000000000..bf5b99fedc --- /dev/null +++ b/changelog.d/5915.bugfix @@ -0,0 +1 @@ +Fix 404 for thumbnail download when `dynamic_thumbnails` is `false` and the thumbnail was dynamically generated. Fix reported by rkfg. diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index d4ea09260c..b972e152a9 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -526,7 +526,7 @@ class MediaRepository(object): try: file_info = FileInfo( server_name=server_name, - file_id=media_id, + file_id=file_id, thumbnail=True, thumbnail_width=t_width, thumbnail_height=t_height, -- cgit 1.4.1 From 36f34e6f3d551ac7f1bcd92771502d8e37722c33 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 2 Sep 2019 18:29:21 +0100 Subject: Remove unused methods from c/s api v1 in register.py (#5963) These methods were part of the v1 C/S API. Remove them as they are no longer used by any code paths. --- changelog.d/5963.misc | 1 + synapse/handlers/register.py | 104 ------------------------------------------- synapse/http/client.py | 34 +------------- 3 files changed, 2 insertions(+), 137 deletions(-) create mode 100644 changelog.d/5963.misc diff --git a/changelog.d/5963.misc b/changelog.d/5963.misc new file mode 100644 index 0000000000..0d6c3c3d65 --- /dev/null +++ b/changelog.d/5963.misc @@ -0,0 +1 @@ +Remove left-over methods from C/S registration API. \ No newline at end of file diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index be0425a33b..3142d85788 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -24,13 +24,11 @@ from synapse.api.errors import ( AuthError, Codes, ConsentNotGivenError, - InvalidCaptchaError, LimitExceededError, RegistrationError, SynapseError, ) from synapse.config.server import is_threepid_reserved -from synapse.http.client import CaptchaServerHttpClient from synapse.http.servlet import assert_params_in_dict from synapse.replication.http.login import RegisterDeviceReplicationServlet from synapse.replication.http.register import ( @@ -39,7 +37,6 @@ from synapse.replication.http.register import ( ) from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import Linearizer -from synapse.util.threepids import check_3pid_allowed from ._base import BaseHandler @@ -59,7 +56,6 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self.captcha_client = CaptchaServerHttpClient(hs) self.identity_handler = self.hs.get_handlers().identity_handler self.ratelimiter = hs.get_registration_ratelimiter() @@ -362,70 +358,6 @@ class RegistrationHandler(BaseHandler): ) return user_id - @defer.inlineCallbacks - def check_recaptcha(self, ip, private_key, challenge, response): - """ - Checks a recaptcha is correct. - - Used only by c/s api v1 - """ - - captcha_response = yield self._validate_captcha( - ip, private_key, challenge, response - ) - if not captcha_response["valid"]: - logger.info( - "Invalid captcha entered from %s. Error: %s", - ip, - captcha_response["error_url"], - ) - raise InvalidCaptchaError(error_url=captcha_response["error_url"]) - else: - logger.info("Valid captcha entered from %s", ip) - - @defer.inlineCallbacks - def register_email(self, threepidCreds): - """ - Registers emails with an identity server. - - Used only by c/s api v1 - """ - - for c in threepidCreds: - logger.info( - "validating threepidcred sid %s on id server %s", - c["sid"], - c["idServer"], - ) - try: - threepid = yield self.identity_handler.threepid_from_creds(c) - except Exception: - logger.exception("Couldn't validate 3pid") - raise RegistrationError(400, "Couldn't validate 3pid") - - if not threepid: - raise RegistrationError(400, "Couldn't validate 3pid") - logger.info( - "got threepid with medium '%s' and address '%s'", - threepid["medium"], - threepid["address"], - ) - - if not check_3pid_allowed(self.hs, threepid["medium"], threepid["address"]): - raise RegistrationError(403, "Third party identifier is not allowed") - - @defer.inlineCallbacks - def bind_emails(self, user_id, threepidCreds): - """Links emails with a user ID and informs an identity server. - - Used only by c/s api v1 - """ - - # Now we have a matrix ID, bind it to the threepids we were given - for c in threepidCreds: - # XXX: This should be a deferred list, shouldn't it? - yield self.identity_handler.bind_threepid(c, user_id) - def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None): # don't allow people to register the server notices mxid if self._server_notices_mxid is not None: @@ -463,42 +395,6 @@ class RegistrationHandler(BaseHandler): self._next_generated_user_id += 1 return str(id) - @defer.inlineCallbacks - def _validate_captcha(self, ip_addr, private_key, challenge, response): - """Validates the captcha provided. - - Used only by c/s api v1 - - Returns: - dict: Containing 'valid'(bool) and 'error_url'(str) if invalid. - - """ - response = yield self._submit_captcha(ip_addr, private_key, challenge, response) - # parse Google's response. Lovely format.. - lines = response.split("\n") - json = { - "valid": lines[0] == "true", - "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" - + "error=%s" % lines[1], - } - return json - - @defer.inlineCallbacks - def _submit_captcha(self, ip_addr, private_key, challenge, response): - """ - Used only by c/s api v1 - """ - data = yield self.captcha_client.post_urlencoded_get_raw( - "http://www.recaptcha.net:80/recaptcha/api/verify", - args={ - "privatekey": private_key, - "remoteip": ip_addr, - "challenge": challenge, - "response": response, - }, - ) - return data - @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): room_id = None diff --git a/synapse/http/client.py b/synapse/http/client.py index 0ac20ebefc..0ae6db8ea7 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -35,7 +35,7 @@ from twisted.internet.interfaces import ( ) from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool, PartialDownloadError, readBody +from twisted.web.client import Agent, HTTPConnectionPool, readBody from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers @@ -599,38 +599,6 @@ def _readBodyToFile(response, stream, max_size): return d -class CaptchaServerHttpClient(SimpleHttpClient): - """ - Separate HTTP client for talking to google's captcha servers - Only slightly special because accepts partial download responses - - used only by c/s api v1 - """ - - @defer.inlineCallbacks - def post_urlencoded_get_raw(self, url, args={}): - query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True) - - response = yield self.request( - "POST", - url, - data=query_bytes, - headers=Headers( - { - b"Content-Type": [b"application/x-www-form-urlencoded"], - b"User-Agent": [self.user_agent], - } - ), - ) - - try: - body = yield make_deferred_yieldable(readBody(response)) - return body - except PartialDownloadError as e: - # twisted dislikes google's response, no content length. - return e.response - - def encode_urlencode_args(args): return {k: encode_urlencode_arg(v) for k, v in args.items()} -- cgit 1.4.1 From a90d16dabc6f498136a098568b1d37858d4af5b6 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Tue, 3 Sep 2019 10:21:30 +0100 Subject: Opentrace device lists (#5853) Trace device list changes. --- changelog.d/5853.feature | 1 + synapse/handlers/device.py | 65 +++++++++++++++++++++++++- synapse/handlers/devicemessage.py | 6 ++- synapse/logging/opentracing.py | 70 +++++++--------------------- synapse/rest/client/v2_alpha/keys.py | 4 +- synapse/rest/client/v2_alpha/sendtodevice.py | 4 ++ synapse/storage/deviceinbox.py | 21 +++++++++ synapse/storage/devices.py | 5 ++ 8 files changed, 118 insertions(+), 58 deletions(-) create mode 100644 changelog.d/5853.feature diff --git a/changelog.d/5853.feature b/changelog.d/5853.feature new file mode 100644 index 0000000000..80a04ae2ee --- /dev/null +++ b/changelog.d/5853.feature @@ -0,0 +1 @@ +Opentracing for device list updates. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 5c1cf83c9d..71a8f33da3 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -25,6 +25,7 @@ from synapse.api.errors import ( HttpResponseException, RequestSendFailed, ) +from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util import stringutils from synapse.util.async_helpers import Linearizer @@ -45,6 +46,7 @@ class DeviceWorkerHandler(BaseHandler): self.state = hs.get_state_handler() self._auth_handler = hs.get_auth_handler() + @trace @defer.inlineCallbacks def get_devices_by_user(self, user_id): """ @@ -56,6 +58,7 @@ class DeviceWorkerHandler(BaseHandler): defer.Deferred: list[dict[str, X]]: info on each device """ + set_tag("user_id", user_id) device_map = yield self.store.get_devices_by_user(user_id) ips = yield self.store.get_last_client_ip_by_device(user_id, device_id=None) @@ -64,8 +67,10 @@ class DeviceWorkerHandler(BaseHandler): for device in devices: _update_device_from_client_ips(device, ips) + log_kv(device_map) return devices + @trace @defer.inlineCallbacks def get_device(self, user_id, device_id): """ Retrieve the given device @@ -85,9 +90,14 @@ class DeviceWorkerHandler(BaseHandler): raise errors.NotFoundError ips = yield self.store.get_last_client_ip_by_device(user_id, device_id) _update_device_from_client_ips(device, ips) + + set_tag("device", device) + set_tag("ips", ips) + return device @measure_func("device.get_user_ids_changed") + @trace @defer.inlineCallbacks def get_user_ids_changed(self, user_id, from_token): """Get list of users that have had the devices updated, or have newly @@ -97,6 +107,9 @@ class DeviceWorkerHandler(BaseHandler): user_id (str) from_token (StreamToken) """ + + set_tag("user_id", user_id) + set_tag("from_token", from_token) now_room_key = yield self.store.get_room_events_max_id() room_ids = yield self.store.get_rooms_for_user(user_id) @@ -148,6 +161,9 @@ class DeviceWorkerHandler(BaseHandler): # special-case for an empty prev state: include all members # in the changed list if not event_ids: + log_kv( + {"event": "encountered empty previous state", "room_id": room_id} + ) for key, event_id in iteritems(current_state_ids): etype, state_key = key if etype != EventTypes.Member: @@ -200,7 +216,11 @@ class DeviceWorkerHandler(BaseHandler): possibly_joined = [] possibly_left = [] - return {"changed": list(possibly_joined), "left": list(possibly_left)} + result = {"changed": list(possibly_joined), "left": list(possibly_left)} + + log_kv(result) + + return result class DeviceHandler(DeviceWorkerHandler): @@ -267,6 +287,7 @@ class DeviceHandler(DeviceWorkerHandler): raise errors.StoreError(500, "Couldn't generate a device ID.") + @trace @defer.inlineCallbacks def delete_device(self, user_id, device_id): """ Delete the given device @@ -284,6 +305,10 @@ class DeviceHandler(DeviceWorkerHandler): except errors.StoreError as e: if e.code == 404: # no match + set_tag("error", True) + log_kv( + {"reason": "User doesn't have device id.", "device_id": device_id} + ) pass else: raise @@ -296,6 +321,7 @@ class DeviceHandler(DeviceWorkerHandler): yield self.notify_device_update(user_id, [device_id]) + @trace @defer.inlineCallbacks def delete_all_devices_for_user(self, user_id, except_device_id=None): """Delete all of the user's devices @@ -331,6 +357,8 @@ class DeviceHandler(DeviceWorkerHandler): except errors.StoreError as e: if e.code == 404: # no match + set_tag("error", True) + set_tag("reason", "User doesn't have that device id.") pass else: raise @@ -371,6 +399,7 @@ class DeviceHandler(DeviceWorkerHandler): else: raise + @trace @measure_func("notify_device_update") @defer.inlineCallbacks def notify_device_update(self, user_id, device_ids): @@ -386,6 +415,8 @@ class DeviceHandler(DeviceWorkerHandler): hosts.update(get_domain_from_id(u) for u in users_who_share_room) hosts.discard(self.server_name) + set_tag("target_hosts", hosts) + position = yield self.store.add_device_change_to_streams( user_id, device_ids, list(hosts) ) @@ -405,6 +436,7 @@ class DeviceHandler(DeviceWorkerHandler): ) for host in hosts: self.federation_sender.send_device_messages(host) + log_kv({"message": "sent device update to host", "host": host}) @defer.inlineCallbacks def on_federation_query_user_devices(self, user_id): @@ -451,12 +483,15 @@ class DeviceListUpdater(object): iterable=True, ) + @trace @defer.inlineCallbacks def incoming_device_list_update(self, origin, edu_content): """Called on incoming device list update from federation. Responsible for parsing the EDU and adding to pending updates list. """ + set_tag("origin", origin) + set_tag("edu_content", edu_content) user_id = edu_content.pop("user_id") device_id = edu_content.pop("device_id") stream_id = str(edu_content.pop("stream_id")) # They may come as ints @@ -471,12 +506,30 @@ class DeviceListUpdater(object): device_id, origin, ) + + set_tag("error", True) + log_kv( + { + "message": "Got a device list update edu from a user and " + "device which does not match the origin of the request.", + "user_id": user_id, + "device_id": device_id, + } + ) return room_ids = yield self.store.get_rooms_for_user(user_id) if not room_ids: # We don't share any rooms with this user. Ignore update, as we # probably won't get any further updates. + set_tag("error", True) + log_kv( + { + "message": "Got an update from a user for which " + "we don't share any rooms", + "other user_id": user_id, + } + ) logger.warning( "Got device list update edu for %r/%r, but don't share a room", user_id, @@ -578,6 +631,7 @@ class DeviceListUpdater(object): request: https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid """ + log_kv({"message": "Doing resync to update device list."}) # Fetch all devices for the user. origin = get_domain_from_id(user_id) try: @@ -594,13 +648,20 @@ class DeviceListUpdater(object): # eventually become consistent. return except FederationDeniedError as e: + set_tag("error", True) + log_kv({"reason": "FederationDeniedError"}) logger.info(e) return - except Exception: + except Exception as e: # TODO: Remember that we are now out of sync and try again # later + set_tag("error", True) + log_kv( + {"message": "Exception raised by federation request", "exception": e} + ) logger.exception("Failed to handle device list update for %s", user_id) return + log_kv({"result": result}) stream_id = result["stream_id"] devices = result["devices"] diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index c7d56779b8..01731cb2d0 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -22,6 +22,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.logging.opentracing import ( get_active_span_text_map, + log_kv, set_tag, start_active_span, whitelisted_homeserver, @@ -86,7 +87,8 @@ class DeviceMessageHandler(object): @defer.inlineCallbacks def send_device_message(self, sender_user_id, message_type, messages): - + set_tag("number_of_messages", len(messages)) + set_tag("sender", sender_user_id) local_messages = {} remote_messages = {} for user_id, by_device in messages.items(): @@ -124,6 +126,7 @@ class DeviceMessageHandler(object): else None, } + log_kv({"local_messages": local_messages}) stream_id = yield self.store.add_messages_to_device_inbox( local_messages, remote_edu_contents ) @@ -132,6 +135,7 @@ class DeviceMessageHandler(object): "to_device_key", stream_id, users=local_messages.keys() ) + log_kv({"remote_messages": remote_messages}) for destination in remote_messages.keys(): # Enqueue a new federation transaction to send the new # device messages to each remote destination. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index dd296027a1..256b972aaa 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -85,14 +85,14 @@ the function becomes the operation name for the span. return something_usual_and_useful -Operation names can be explicitly set for functions by using -``trace_using_operation_name`` +Operation names can be explicitly set for a function by passing the +operation name to ``trace`` .. code-block:: python - from synapse.logging.opentracing import trace_using_operation_name + from synapse.logging.opentracing import trace - @trace_using_operation_name("A *much* better operation name") + @trace(opname="a_better_operation_name") def interesting_badly_named_function(*args, **kwargs): # Does all kinds of cool and expected things return something_usual_and_useful @@ -641,66 +641,26 @@ def extract_text_map(carrier): # Tracing decorators -def trace(func): +def trace(func=None, opname=None): """ Decorator to trace a function. - Sets the operation name to that of the function's. + Sets the operation name to that of the function's or that given + as operation_name. See the module's doc string for usage + examples. """ - if opentracing is None: - return func - @wraps(func) - def _trace_inner(self, *args, **kwargs): - if opentracing is None: - return func(self, *args, **kwargs) - - scope = start_active_span(func.__name__) - scope.__enter__() - - try: - result = func(self, *args, **kwargs) - if isinstance(result, defer.Deferred): - - def call_back(result): - scope.__exit__(None, None, None) - return result - - def err_back(result): - scope.span.set_tag(tags.ERROR, True) - scope.__exit__(None, None, None) - return result - - result.addCallbacks(call_back, err_back) - - else: - scope.__exit__(None, None, None) - - return result - - except Exception as e: - scope.__exit__(type(e), None, e.__traceback__) - raise - - return _trace_inner - - -def trace_using_operation_name(operation_name): - """Decorator to trace a function. Explicitely sets the operation_name.""" - - def trace(func): - """ - Decorator to trace a function. - Sets the operation name to that of the function's. - """ + def decorator(func): if opentracing is None: return func + _opname = opname if opname else func.__name__ + @wraps(func) def _trace_inner(self, *args, **kwargs): if opentracing is None: return func(self, *args, **kwargs) - scope = start_active_span(operation_name) + scope = start_active_span(_opname) scope.__enter__() try: @@ -717,6 +677,7 @@ def trace_using_operation_name(operation_name): return result result.addCallbacks(call_back, err_back) + else: scope.__exit__(None, None, None) @@ -728,7 +689,10 @@ def trace_using_operation_name(operation_name): return _trace_inner - return trace + if func: + return decorator(func) + else: + return decorator def tag_args(func): diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 64b6898eb8..2e680134a0 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -24,7 +24,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) -from synapse.logging.opentracing import log_kv, set_tag, trace_using_operation_name +from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.types import StreamToken from ._base import client_patterns @@ -69,7 +69,7 @@ class KeyUploadServlet(RestServlet): self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() - @trace_using_operation_name("upload_keys") + @trace(opname="upload_keys") @defer.inlineCallbacks def on_POST(self, request, device_id): requester = yield self.auth.get_user_by_req(request, allow_guest=True) diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 2613648d82..d90e52ed1a 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -19,6 +19,7 @@ from twisted.internet import defer from synapse.http import servlet from synapse.http.servlet import parse_json_object_from_request +from synapse.logging.opentracing import set_tag, trace from synapse.rest.client.transactions import HttpTransactionCache from ._base import client_patterns @@ -42,7 +43,10 @@ class SendToDeviceRestServlet(servlet.RestServlet): self.txns = HttpTransactionCache(hs) self.device_message_handler = hs.get_device_message_handler() + @trace(opname="sendToDevice") def on_PUT(self, request, message_type, txn_id): + set_tag("message_type", message_type) + set_tag("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self._put, request, message_type, txn_id ) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 4dca9de617..6b7458304e 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -19,6 +19,7 @@ from canonicaljson import json from twisted.internet import defer +from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.storage._base import SQLBaseStore from synapse.storage.background_updates import BackgroundUpdateStore from synapse.util.caches.expiringcache import ExpiringCache @@ -72,6 +73,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): "get_new_messages_for_device", get_new_messages_for_device_txn ) + @trace @defer.inlineCallbacks def delete_messages_for_device(self, user_id, device_id, up_to_stream_id): """ @@ -87,11 +89,15 @@ class DeviceInboxWorkerStore(SQLBaseStore): last_deleted_stream_id = self._last_device_delete_cache.get( (user_id, device_id), None ) + + set_tag("last_deleted_stream_id", last_deleted_stream_id) + if last_deleted_stream_id: has_changed = self._device_inbox_stream_cache.has_entity_changed( user_id, last_deleted_stream_id ) if not has_changed: + log_kv({"message": "No changes in cache since last check"}) return 0 def delete_messages_for_device_txn(txn): @@ -107,6 +113,10 @@ class DeviceInboxWorkerStore(SQLBaseStore): "delete_messages_for_device", delete_messages_for_device_txn ) + log_kv( + {"message": "deleted {} messages for device".format(count), "count": count} + ) + # Update the cache, ensuring that we only ever increase the value last_deleted_stream_id = self._last_device_delete_cache.get( (user_id, device_id), 0 @@ -117,6 +127,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): return count + @trace def get_new_device_msgs_for_remote( self, destination, last_stream_id, current_stream_id, limit ): @@ -132,16 +143,23 @@ class DeviceInboxWorkerStore(SQLBaseStore): in the stream the messages got to. """ + set_tag("destination", destination) + set_tag("last_stream_id", last_stream_id) + set_tag("current_stream_id", current_stream_id) + set_tag("limit", limit) + has_changed = self._device_federation_outbox_stream_cache.has_entity_changed( destination, last_stream_id ) if not has_changed or last_stream_id == current_stream_id: + log_kv({"message": "No new messages in stream"}) return defer.succeed(([], current_stream_id)) if limit <= 0: # This can happen if we run out of room for EDUs in the transaction. return defer.succeed(([], last_stream_id)) + @trace def get_new_messages_for_remote_destination_txn(txn): sql = ( "SELECT stream_id, messages_json FROM device_federation_outbox" @@ -156,6 +174,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): stream_pos = row[0] messages.append(json.loads(row[1])) if len(messages) < limit: + log_kv({"message": "Set stream position to current position"}) stream_pos = current_stream_id return messages, stream_pos @@ -164,6 +183,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): get_new_messages_for_remote_destination_txn, ) + @trace def delete_device_msgs_for_remote(self, destination, up_to_stream_id): """Used to delete messages when the remote destination acknowledges their receipt. @@ -214,6 +234,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): expiry_ms=30 * 60 * 1000, ) + @trace @defer.inlineCallbacks def add_messages_to_device_inbox( self, local_messages_by_user_then_device, remote_messages_by_destination diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 76542c512d..41f62828bd 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -23,6 +23,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError from synapse.logging.opentracing import ( get_active_span_text_map, + set_tag, trace, whitelisted_homeserver, ) @@ -321,6 +322,7 @@ class DeviceWorkerStore(SQLBaseStore): def get_device_stream_token(self): return self._device_list_id_gen.get_current_token() + @trace @defer.inlineCallbacks def get_user_devices_from_cache(self, query_list): """Get the devices (and keys if any) for remote users from the cache. @@ -352,6 +354,9 @@ class DeviceWorkerStore(SQLBaseStore): else: results[user_id] = yield self._get_cached_devices_for_user(user_id) + set_tag("in_cache", results) + set_tag("not_in_cache", user_ids_not_in_cache) + return user_ids_not_in_cache, results @cachedInlineCallbacks(num_args=2, tree=True) -- cgit 1.4.1 From 2a447826665a5ac8e12736214f0ef2401e72f1f9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 3 Sep 2019 11:42:45 +0100 Subject: Remove double return statements (#5962) Remove all the "double return" statements which were a result of us removing all the instances of ``` defer.returnValue(...) return ``` statements when we switched to python3 fully. --- changelog.d/5962.misc | 1 + synapse/api/auth.py | 1 - synapse/appservice/api.py | 3 --- synapse/handlers/appservice.py | 2 -- synapse/handlers/events.py | 1 - synapse/handlers/initial_sync.py | 2 -- synapse/handlers/room.py | 1 - synapse/handlers/sync.py | 1 - synapse/rest/client/v1/room.py | 1 - synapse/rest/client/v2_alpha/register.py | 2 -- synapse/rest/media/v1/preview_url_resource.py | 1 - synapse/state/__init__.py | 1 - synapse/storage/appservice.py | 1 - synapse/storage/directory.py | 2 -- synapse/storage/profile.py | 1 - 15 files changed, 1 insertion(+), 20 deletions(-) create mode 100644 changelog.d/5962.misc diff --git a/changelog.d/5962.misc b/changelog.d/5962.misc new file mode 100644 index 0000000000..d97d376c36 --- /dev/null +++ b/changelog.d/5962.misc @@ -0,0 +1 @@ +Remove unnecessary return statements in the codebase which were the result of a regex run. \ No newline at end of file diff --git a/synapse/api/auth.py b/synapse/api/auth.py index fd3cdf50b0..ddc195bc32 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -704,7 +704,6 @@ class Auth(object): and visibility.content["history_visibility"] == "world_readable" ): return Membership.JOIN, None - return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN ) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 007ca75a94..3e25bf5747 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -107,7 +107,6 @@ class ApplicationServiceApi(SimpleHttpClient): except CodeMessageException as e: if e.code == 404: return False - return logger.warning("query_user to %s received %s", uri, e.code) except Exception as ex: logger.warning("query_user to %s threw exception %s", uri, ex) @@ -127,7 +126,6 @@ class ApplicationServiceApi(SimpleHttpClient): logger.warning("query_alias to %s received %s", uri, e.code) if e.code == 404: return False - return except Exception as ex: logger.warning("query_alias to %s threw exception %s", uri, ex) return False @@ -230,7 +228,6 @@ class ApplicationServiceApi(SimpleHttpClient): sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(events)) return True - return except CodeMessageException as e: logger.warning("push_bulk to %s received %s", uri, e.code) except Exception as ex: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index d1a51df6f9..3e9b298154 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -294,12 +294,10 @@ class ApplicationServicesHandler(object): # we don't know if they are unknown or not since it isn't one of our # users. We can't poke ASes. return False - return user_info = yield self.store.get_user_by_id(user_id) if user_info: return False - return # user not found; could be the AS though, so check. services = self.store.get_app_services() diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 2f1f10a9af..5e748687e3 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -167,7 +167,6 @@ class EventHandler(BaseHandler): if not event: return None - return users = yield self.store.get_users_in_room(event.room_id) is_peeking = user.to_string() not in users diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 595f75400b..f991efeee3 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -450,7 +450,6 @@ class InitialSyncHandler(BaseHandler): # else it will throw. member_event = yield self.auth.check_user_was_in_room(room_id, user_id) return member_event.membership, member_event.event_id - return except AuthError: visibility = yield self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" @@ -460,7 +459,6 @@ class InitialSyncHandler(BaseHandler): and visibility.content["history_visibility"] == "world_readable" ): return Membership.JOIN, None - return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 6e47fe7867..a509e11d69 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -852,7 +852,6 @@ class RoomContextHandler(object): ) if not event: return None - return filtered = yield (filter_evts([event])) if not filtered: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index d582f8e494..19bca6717f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -578,7 +578,6 @@ class SyncHandler(object): if not last_events: return None - return last_event = last_events[-1] state_ids = yield self.store.get_state_ids_for_event( diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index f244e8f469..3582259026 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -703,7 +703,6 @@ class RoomMembershipRestServlet(TransactionRestServlet): txn_id, ) return 200, {} - return target = requester.user if membership_action in ["invite", "ban", "unban", "kick"]: diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 65f9fce2ff..107854c669 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -230,7 +230,6 @@ class RegisterRestServlet(RestServlet): if kind == b"guest": ret = yield self._do_guest_registration(body, address=client_addr) return ret - return elif kind != b"user": raise UnrecognizedRequestError( "Do not understand membership kind: %s" % (kind,) @@ -280,7 +279,6 @@ class RegisterRestServlet(RestServlet): desired_username, access_token, body ) return 200, result # we throw for non 200 responses - return # for regular registration, downcase the provided username before # attempting to register it. This should mean diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index bd40891a7f..7a56cd4b6c 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -183,7 +183,6 @@ class PreviewUrlResource(DirectServeResource): if isinstance(og, six.text_type): og = og.encode("utf8") return og - return media_info = yield self._download_url(url, user) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index a0d34f16ea..2b0f4c79ee 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -136,7 +136,6 @@ class StateHandler(object): if event_id: event = yield self.store.get_event(event_id, allow_none=True) return event - return state_map = yield self.store.get_events( list(state.values()), get_prev_content=False diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 36657753cd..435b2acd4d 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -165,7 +165,6 @@ class ApplicationServiceTransactionWorkerStore( ) if result: return result.get("state") - return return None def set_appservice_state(self, service, state): diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index e966a73f3d..eed7757ed5 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -47,7 +47,6 @@ class DirectoryWorkerStore(SQLBaseStore): if not room_id: return None - return servers = yield self._simple_select_onecol( "room_alias_servers", @@ -58,7 +57,6 @@ class DirectoryWorkerStore(SQLBaseStore): if not servers: return None - return return RoomAliasMapping(room_id, room_alias.to_string(), servers) diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 8a5d8e9b18..912c1df6be 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -35,7 +35,6 @@ class ProfileWorkerStore(SQLBaseStore): if e.code == 404: # no match return ProfileInfo(None, None) - return else: raise -- cgit 1.4.1 From 8401bcd206ed483a03d7bd7ea97cc401b86ed4b4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 3 Sep 2019 12:44:14 +0100 Subject: fix typo --- synapse/handlers/presence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 8377a0ddc2..053cf66b28 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -255,7 +255,7 @@ class PresenceHandler(object): self.unpersisted_users_changes = set() if unpersisted: - logger.info("Persisting %d upersisted presence updates", len(unpersisted)) + logger.info("Persisting %d unpersisted presence updates", len(unpersisted)) yield self.store.update_presence( [self.user_to_current_state[user_id] for user_id in unpersisted] ) -- cgit 1.4.1 From 0eac7077c9e6049a86c6fb05ab127ffc84cf7315 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 3 Sep 2019 09:01:30 -0600 Subject: Ensure an auth instance is available to ListMediaInRoom (#5967) * Ensure an auth instance is available to ListMediaInRoom Fixes https://github.com/matrix-org/synapse/issues/5737 * Changelog --- changelog.d/5967.bugfix | 1 + synapse/rest/admin/media.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/5967.bugfix diff --git a/changelog.d/5967.bugfix b/changelog.d/5967.bugfix new file mode 100644 index 0000000000..8d7bf5c2e9 --- /dev/null +++ b/changelog.d/5967.bugfix @@ -0,0 +1 @@ +Fix list media admin API always returning an error. diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index f3f63f0be7..ed7086d09c 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -60,6 +60,7 @@ class ListMediaInRoom(RestServlet): def __init__(self, hs): self.store = hs.get_datastore() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): -- cgit 1.4.1 From 894c1a575979a52114fac09ab3814f4fe023659a Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Tue, 3 Sep 2019 16:36:01 +0100 Subject: Docker packaging should not su-exec or chmod if already running as UID/GID (#5970) Adjust su-exec to only be used if needed. If UID == getuid() and GID == getgid() then we do not need to su-exec, and chmod will not work. --- changelog.d/5970.docker | 1 + docker/start.py | 84 ++++++++++++++++++++++++++++--------------------- 2 files changed, 50 insertions(+), 35 deletions(-) create mode 100644 changelog.d/5970.docker diff --git a/changelog.d/5970.docker b/changelog.d/5970.docker new file mode 100644 index 0000000000..c9d04da9cd --- /dev/null +++ b/changelog.d/5970.docker @@ -0,0 +1 @@ +Avoid changing UID/GID if they are already correct. diff --git a/docker/start.py b/docker/start.py index 40a861f200..260f2d9943 100755 --- a/docker/start.py +++ b/docker/start.py @@ -41,8 +41,8 @@ def generate_config_from_template(config_dir, config_path, environ, ownership): config_dir (str): where to put generated config files config_path (str): where to put the main config file environ (dict): environment dictionary - ownership (str): ":" string which will be used to set - ownership of the generated configs + ownership (str|None): ":" string which will be used to set + ownership of the generated configs. If None, ownership will not change. """ for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"): if v not in environ: @@ -105,24 +105,24 @@ def generate_config_from_template(config_dir, config_path, environ, ownership): log("Generating log config file " + log_config_file) convert("/conf/log.config", log_config_file, environ) - subprocess.check_output(["chown", "-R", ownership, "/data"]) - # Hopefully we already have a signing key, but generate one if not. - subprocess.check_output( - [ - "su-exec", - ownership, - "python", - "-m", - "synapse.app.homeserver", - "--config-path", - config_path, - # tell synapse to put generated keys in /data rather than /compiled - "--keys-directory", - config_dir, - "--generate-keys", - ] - ) + args = [ + "python", + "-m", + "synapse.app.homeserver", + "--config-path", + config_path, + # tell synapse to put generated keys in /data rather than /compiled + "--keys-directory", + config_dir, + "--generate-keys", + ] + + if ownership is not None: + subprocess.check_output(["chown", "-R", ownership, "/data"]) + args = ["su-exec", ownership] + args + + subprocess.check_output(args) def run_generate_config(environ, ownership): @@ -130,7 +130,7 @@ def run_generate_config(environ, ownership): Args: environ (dict): env var dict - ownership (str): "userid:groupid" arg for chmod + ownership (str|None): "userid:groupid" arg for chmod. If None, ownership will not change. Never returns. """ @@ -149,9 +149,6 @@ def run_generate_config(environ, ownership): log("Creating log config %s" % (log_config_file,)) convert("/conf/log.config", log_config_file, environ) - # make sure that synapse has perms to write to the data dir. - subprocess.check_output(["chown", ownership, data_dir]) - args = [ "python", "-m", @@ -170,12 +167,33 @@ def run_generate_config(environ, ownership): "--open-private-ports", ] # log("running %s" % (args, )) - os.execv("/usr/local/bin/python", args) + + if ownership is not None: + args = ["su-exec", ownership] + args + os.execv("/sbin/su-exec", args) + + # make sure that synapse has perms to write to the data dir. + subprocess.check_output(["chown", ownership, data_dir]) + else: + os.execv("/usr/local/bin/python", args) def main(args, environ): mode = args[1] if len(args) > 1 else None - ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991)) + desired_uid = int(environ.get("UID", "991")) + desired_gid = int(environ.get("GID", "991")) + if (desired_uid == os.getuid()) and (desired_gid == os.getgid()): + ownership = None + else: + ownership = "{}:{}".format(desired_uid, desired_gid) + + log( + "Container running as UserID %s:%s, ENV (or defaults) requests %s:%s" + % (os.getuid(), os.getgid(), desired_uid, desired_gid) + ) + + if ownership is None: + log("Will not perform chmod/su-exec as UserID already matches request") # In generate mode, generate a configuration and missing keys, then exit if mode == "generate": @@ -227,16 +245,12 @@ def main(args, environ): log("Starting synapse with config file " + config_path) - args = [ - "su-exec", - ownership, - "python", - "-m", - "synapse.app.homeserver", - "--config-path", - config_path, - ] - os.execv("/sbin/su-exec", args) + args = ["python", "-m", "synapse.app.homeserver", "--config-path", config_path] + if ownership is not None: + args = ["su-exec", ownership] + args + os.execv("/sbin/su-exec", args) + else: + os.execv("/usr/local/bin/python", args) if __name__ == "__main__": -- cgit 1.4.1 From a98b8583c647d841b49b94a3de030b9e8d5271c0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 3 Sep 2019 19:58:51 +0100 Subject: Remove unnecessary variable declaration --- synapse/handlers/register.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 3142d85788..1711d5ac5c 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -397,7 +397,6 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): - room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): room_id = room_identifier -- cgit 1.4.1 From 6b6086b8bf8d222e528002009d921804d306d85d Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 3 Sep 2019 20:00:09 +0100 Subject: Fix docstring --- synapse/handlers/register.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 1711d5ac5c..e59b2a3684 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -681,8 +681,7 @@ class RegistrationHandler(BaseHandler): Args: user_id (str): id of user threepid (object): m.login.msisdn auth response - token (str): access_token for the user - bind_email (bool): true if the client requested the email to be + bind_msisdn (bool): true if the client requested the msisdn to be bound at the identity server Returns: defer.Deferred: -- cgit 1.4.1