From 4806651744616bf48abf408034ab9560e33f60ce Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 23 Jul 2019 23:00:55 +1000 Subject: Replace returnValue with return (#5736) --- tests/crypto/test_keyring.py | 6 +++--- tests/handlers/test_register.py | 2 +- tests/http/federation/test_matrix_federation_agent.py | 4 ++-- tests/http/federation/test_srv_resolver.py | 2 +- tests/http/test_fedclient.py | 2 +- tests/rest/client/test_transactions.py | 2 +- tests/storage/test_background_update.py | 4 ++-- tests/storage/test_redaction.py | 4 ++-- tests/storage/test_roommember.py | 2 +- tests/storage/test_state.py | 2 +- tests/test_visibility.py | 6 +++--- tests/util/caches/test_descriptors.py | 8 ++++---- tests/utils.py | 6 +++--- 13 files changed, 25 insertions(+), 25 deletions(-) (limited to 'tests') diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 8d94a503d6..c4f0bbd3dd 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -107,7 +107,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertEquals(LoggingContext.current_context().request, "11") with PreserveLoggingContext(): yield persp_deferred - defer.returnValue(persp_resp) + return persp_resp self.http_client.post_json.side_effect = get_perspectives @@ -554,7 +554,7 @@ def run_in_context(f, *args, **kwargs): # logs. ctx.request = "testctx" rv = yield f(*args, **kwargs) - defer.returnValue(rv) + return rv def _verify_json_for_server(kr, *args): @@ -565,6 +565,6 @@ def _verify_json_for_server(kr, *args): @defer.inlineCallbacks def v(): rv1 = yield kr.verify_json_for_server(*args) - defer.returnValue(rv1) + return rv1 return run_in_context(v) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 90d0129374..99dce45cfe 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -283,4 +283,4 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user, requester, displayname, by_admin=True ) - defer.returnValue((user_id, token)) + return (user_id, token) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index a49f9b3224..b906686b49 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -145,7 +145,7 @@ class MatrixFederationAgentTests(TestCase): try: fetch_res = yield fetch_d - defer.returnValue(fetch_res) + return fetch_res except Exception as e: logger.info("Fetch of %s failed: %s", uri.decode("ascii"), e) raise @@ -936,7 +936,7 @@ class MatrixFederationAgentTests(TestCase): except Exception as e: logger.warning("Error fetching well-known: %s", e) raise - defer.returnValue(result) + return result def test_well_known_cache(self): self.reactor.lookups["testserv"] = "1.2.3.4" diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 65b51dc981..3b885ef64b 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -61,7 +61,7 @@ class SrvResolverTestCase(unittest.TestCase): # should have restored our context self.assertIs(LoggingContext.current_context(), ctx) - defer.returnValue(result) + return result test_d = do_lookup() self.assertNoResult(test_d) diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index b9d6d7ad1c..2b01f40a42 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -68,7 +68,7 @@ class FederationClientTests(HomeserverTestCase): try: fetch_res = yield fetch_d - defer.returnValue(fetch_res) + return fetch_res finally: check_logcontext(context) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index a8adc9a61d..a3d7e3c046 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -46,7 +46,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): @defer.inlineCallbacks def cb(): yield Clock(reactor).sleep(0) - defer.returnValue("yay") + return "yay" @defer.inlineCallbacks def test(): diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index fbb9302694..9fabe3fbc0 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -43,7 +43,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): "test_update", progress, ) - defer.returnValue(count) + return count self.update_handler.side_effect = update @@ -60,7 +60,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): @defer.inlineCallbacks def update(progress, count): yield self.store._end_background_update("test_update") - defer.returnValue(count) + return count self.update_handler.side_effect = update self.update_handler.reset_mock() diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 732a778fab..1cb471205b 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -69,7 +69,7 @@ class RedactionTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_message(self, room, user, body): @@ -92,7 +92,7 @@ class RedactionTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_redaction(self, room, event_id, user, reason): diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 73ed943f5a..c6e8196b91 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -67,7 +67,7 @@ class RoomMemberStoreTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def test_one_member(self): diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 212a7ae765..5c2cf3c2db 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -65,7 +65,7 @@ class StateStoreTestCase(tests.unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event def assertStateMapEqual(self, s1, s2): for t in s1: diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 118c3bd238..e0605dac2f 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -139,7 +139,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): builder ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_room_member(self, user_id, membership="join", extra_content={}): @@ -161,7 +161,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_message(self, user_id, content=None): @@ -182,7 +182,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def test_large_room(self): diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 7807328e2f..56320bbaf9 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -159,7 +159,7 @@ class DescriptorTestCase(unittest.TestCase): def inner_fn(): with PreserveLoggingContext(): yield complete_lookup - defer.returnValue(1) + return 1 return inner_fn() @@ -169,7 +169,7 @@ class DescriptorTestCase(unittest.TestCase): c1.name = "c1" r = yield obj.fn(1) self.assertEqual(LoggingContext.current_context(), c1) - defer.returnValue(r) + return r def check_result(r): self.assertEqual(r, 1) @@ -286,7 +286,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): # we want this to behave like an asynchronous function yield run_on_reactor() assert LoggingContext.current_context().request == "c1" - defer.returnValue(self.mock(args1, arg2)) + return self.mock(args1, arg2) with LoggingContext() as c1: c1.request = "c1" @@ -334,7 +334,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def list_fn(self, args1, arg2): # we want this to behave like an asynchronous function yield run_on_reactor() - defer.returnValue(self.mock(args1, arg2)) + return self.mock(args1, arg2) obj = Cls() invalidate0 = mock.Mock() diff --git a/tests/utils.py b/tests/utils.py index 8a94ce0b47..425e3387db 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -361,7 +361,7 @@ def setup_test_homeserver( if fed: register_federation_servlets(hs, fed) - defer.returnValue(hs) + return hs def register_federation_servlets(hs, resource): @@ -465,9 +465,9 @@ class MockHttpResource(HttpServer): args = [urlparse.unquote(u) for u in matcher.groups()] (code, response) = yield func(mock_request, *args) - defer.returnValue((code, response)) + return (code, response) except CodeMessageException as e: - defer.returnValue((e.code, cs_error(e.msg, code=e.errcode))) + return (e.code, cs_error(e.msg, code=e.errcode)) raise KeyError("No event can handle %s" % path) -- cgit 1.5.1 From 73bbaf2bc6962df2c25443cbc70286318601af5a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 16:55:45 +0100 Subject: Add unit test for current state membership bg update --- tests/storage/test_roommember.py | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 73ed943f5a..b04be921f4 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -20,7 +20,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions -from synapse.types import RoomID, UserID +from synapse.types import Requester, RoomID, UserID from tests import unittest from tests.utils import create_room, setup_test_homeserver @@ -84,3 +84,38 @@ class RoomMemberStoreTestCase(unittest.TestCase): ) ], ) + + +class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + self.room_creator = homeserver.get_room_creation_handler() + + def test_can_rerun_update(self): + # First make sure we have completed all updates. + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + # Now let's create a room, which will insert a membership + user = UserID("alice", "test") + requester = Requester(user, None, False, None, None) + self.get_success(self.room_creator.create_room(requester, {})) + + # Register the background update to run again. + self.get_success( + self.store._simple_insert( + table="background_updates", + values={ + "update_name": "current_state_events_membership", + "progress_json": "{}", + "depends_on": None, + }, + ) + ) + + # ... and tell the DataStore that it hasn't finished all updates yet + self.store._all_done = False + + # Now let's actually drive the updates to completion + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) -- cgit 1.5.1 From cf2972c818344214244961e6175f559a6b59123b Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Wed, 24 Jul 2019 13:07:35 +0100 Subject: Fix servlet metric names (#5734) * Fix servlet metric names Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> * Remove redundant check * Cover all return paths --- changelog.d/5734.bugfix | 1 + synapse/federation/transport/server.py | 4 ++- synapse/http/server.py | 41 ++++++++++++++++++++--------- synapse/http/servlet.py | 4 ++- synapse/replication/http/_base.py | 2 +- synapse/rest/admin/server_notice_servlet.py | 9 +++++-- synapse/rest/client/v1/room.py | 37 +++++++++++++++++++++----- synapse/rest/client/v2_alpha/relations.py | 2 ++ tests/test_server.py | 21 +++++++++++---- tests/utils.py | 2 +- 10 files changed, 92 insertions(+), 31 deletions(-) create mode 100644 changelog.d/5734.bugfix (limited to 'tests') diff --git a/changelog.d/5734.bugfix b/changelog.d/5734.bugfix new file mode 100644 index 0000000000..33aea5a94c --- /dev/null +++ b/changelog.d/5734.bugfix @@ -0,0 +1 @@ +Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 663264dec4..ea4e1b6d0f 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -325,7 +325,9 @@ class BaseFederationServlet(object): if code is None: continue - server.register_paths(method, (pattern,), self._wrap(code)) + server.register_paths( + method, (pattern,), self._wrap(code), self.__class__.__name__ + ) class FederationSendServlet(BaseFederationServlet): diff --git a/synapse/http/server.py b/synapse/http/server.py index 72a3d67eb6..e6f351ba3b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -245,7 +245,9 @@ class JsonResource(HttpServer, resource.Resource): isLeaf = True - _PathEntry = collections.namedtuple("_PathEntry", ["pattern", "callback"]) + _PathEntry = collections.namedtuple( + "_PathEntry", ["pattern", "callback", "servlet_classname"] + ) def __init__(self, hs, canonical_json=True): resource.Resource.__init__(self) @@ -255,12 +257,28 @@ class JsonResource(HttpServer, resource.Resource): self.path_regexs = {} self.hs = hs - def register_paths(self, method, path_patterns, callback): + def register_paths(self, method, path_patterns, callback, servlet_classname): + """ + Registers a request handler against a regular expression. Later request URLs are + checked against these regular expressions in order to identify an appropriate + handler for that request. + + Args: + method (str): GET, POST etc + + path_patterns (Iterable[str]): A list of regular expressions to which + the request URLs are compared. + + callback (function): The handler for the request. Usually a Servlet + + servlet_classname (str): The name of the handler to be used in prometheus + and opentracing logs. + """ method = method.encode("utf-8") # method is bytes on py3 for path_pattern in path_patterns: logger.debug("Registering for %s %s", method, path_pattern.pattern) self.path_regexs.setdefault(method, []).append( - self._PathEntry(path_pattern, callback) + self._PathEntry(path_pattern, callback, servlet_classname) ) def render(self, request): @@ -275,13 +293,9 @@ class JsonResource(HttpServer, resource.Resource): This checks if anyone has registered a callback for that method and path. """ - callback, group_dict = self._get_handler_for_request(request) + callback, servlet_classname, group_dict = self._get_handler_for_request(request) - servlet_instance = getattr(callback, "__self__", None) - if servlet_instance is not None: - servlet_classname = servlet_instance.__class__.__name__ - else: - servlet_classname = "%r" % callback + # Make sure we have a name for this handler in prometheus. request.request_metrics.name = servlet_classname # Now trigger the callback. If it returns a response, we send it @@ -311,7 +325,8 @@ class JsonResource(HttpServer, resource.Resource): request (twisted.web.http.Request): Returns: - Tuple[Callable, dict[unicode, unicode]]: callback method, and the + Tuple[Callable, str, dict[unicode, unicode]]: callback method, the + label to use for that method in prometheus metrics, and the dict mapping keys to path components as specified in the handler's path match regexp. @@ -320,7 +335,7 @@ class JsonResource(HttpServer, resource.Resource): None, or a tuple of (http code, response body). """ if request.method == b"OPTIONS": - return _options_handler, {} + return _options_handler, "options_request_handler", {} # Loop through all the registered callbacks to check if the method # and path regex match @@ -328,10 +343,10 @@ class JsonResource(HttpServer, resource.Resource): m = path_entry.pattern.match(request.path.decode("ascii")) if m: # We found a match! - return path_entry.callback, m.groupdict() + return path_entry.callback, path_entry.servlet_classname, m.groupdict() # Huh. No one wanted to handle that? Fiiiiiine. Send 400. - return _unrecognised_request_handler, {} + return _unrecognised_request_handler, "unrecognised_request_handler", {} def _send_response( self, request, code, response_json_object, response_code_message=None diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 889038ff25..f0ca7d9aba 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -290,11 +290,13 @@ class RestServlet(object): for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"): if hasattr(self, "on_%s" % (method,)): + servlet_classname = self.__class__.__name__ method_handler = getattr(self, "on_%s" % (method,)) http_server.register_paths( method, patterns, - trace_servlet(self.__class__.__name__, method_handler), + trace_servlet(servlet_classname, method_handler), + servlet_classname, ) else: diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index fe482e279f..43c89e36dd 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -205,7 +205,7 @@ class ReplicationEndpoint(object): args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args) pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args)) - http_server.register_paths(method, [pattern], handler) + http_server.register_paths(method, [pattern], handler, self.__class__.__name__) def _cached_handler(self, request, txn_id, **kwargs): """Called on new incoming requests when caching is enabled. Checks diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index ee66838a0d..d9c71261f2 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -59,9 +59,14 @@ class SendServerNoticeServlet(RestServlet): def register(self, json_resource): PATTERN = "^/_synapse/admin/v1/send_server_notice" - json_resource.register_paths("POST", (re.compile(PATTERN + "$"),), self.on_POST) json_resource.register_paths( - "PUT", (re.compile(PATTERN + "/(?P[^/]*)$"),), self.on_PUT + "POST", (re.compile(PATTERN + "$"),), self.on_POST, self.__class__.__name__ + ) + json_resource.register_paths( + "PUT", + (re.compile(PATTERN + "/(?P[^/]*)$"),), + self.on_PUT, + self.__class__.__name__, ) @defer.inlineCallbacks diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7709c2d705..6276e97f89 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -67,11 +67,17 @@ class RoomCreateRestServlet(TransactionRestServlet): register_txn_path(self, PATTERNS, http_server) # define CORS for all of /rooms in RoomCreateRestServlet for simplicity http_server.register_paths( - "OPTIONS", client_patterns("/rooms(?:/.*)?$", v1=True), self.on_OPTIONS + "OPTIONS", + client_patterns("/rooms(?:/.*)?$", v1=True), + self.on_OPTIONS, + self.__class__.__name__, ) # define CORS for /createRoom[/txnid] http_server.register_paths( - "OPTIONS", client_patterns("/createRoom(?:/.*)?$", v1=True), self.on_OPTIONS + "OPTIONS", + client_patterns("/createRoom(?:/.*)?$", v1=True), + self.on_OPTIONS, + self.__class__.__name__, ) def on_PUT(self, request, txn_id): @@ -116,16 +122,28 @@ class RoomStateEventRestServlet(TransactionRestServlet): ) http_server.register_paths( - "GET", client_patterns(state_key, v1=True), self.on_GET + "GET", + client_patterns(state_key, v1=True), + self.on_GET, + self.__class__.__name__, ) http_server.register_paths( - "PUT", client_patterns(state_key, v1=True), self.on_PUT + "PUT", + client_patterns(state_key, v1=True), + self.on_PUT, + self.__class__.__name__, ) http_server.register_paths( - "GET", client_patterns(no_state_key, v1=True), self.on_GET_no_state_key + "GET", + client_patterns(no_state_key, v1=True), + self.on_GET_no_state_key, + self.__class__.__name__, ) http_server.register_paths( - "PUT", client_patterns(no_state_key, v1=True), self.on_PUT_no_state_key + "PUT", + client_patterns(no_state_key, v1=True), + self.on_PUT_no_state_key, + self.__class__.__name__, ) def on_GET_no_state_key(self, request, room_id, event_type): @@ -845,18 +863,23 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False): with_get: True to also register respective GET paths for the PUTs. """ http_server.register_paths( - "POST", client_patterns(regex_string + "$", v1=True), servlet.on_POST + "POST", + client_patterns(regex_string + "$", v1=True), + servlet.on_POST, + servlet.__class__.__name__, ) http_server.register_paths( "PUT", client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_PUT, + servlet.__class__.__name__, ) if with_get: http_server.register_paths( "GET", client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_GET, + servlet.__class__.__name__, ) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 6e52f6d284..9e9a639055 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -72,11 +72,13 @@ class RelationSendServlet(RestServlet): "POST", client_patterns(self.PATTERN + "$", releases=()), self.on_PUT_or_POST, + self.__class__.__name__, ) http_server.register_paths( "PUT", client_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), self.on_PUT, + self.__class__.__name__, ) def on_PUT(self, request, *args, **kwargs): diff --git a/tests/test_server.py b/tests/test_server.py index ba08483a4b..2a7d407c98 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -61,7 +61,10 @@ class JsonResourceTests(unittest.TestCase): res = JsonResource(self.homeserver) res.register_paths( - "GET", [re.compile("^/_matrix/foo/(?P[^/]*)$")], _callback + "GET", + [re.compile("^/_matrix/foo/(?P[^/]*)$")], + _callback, + "test_servlet", ) request, channel = make_request( @@ -82,7 +85,9 @@ class JsonResourceTests(unittest.TestCase): raise Exception("boo") res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -105,7 +110,9 @@ class JsonResourceTests(unittest.TestCase): return make_deferred_yieldable(d) res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -122,7 +129,9 @@ class JsonResourceTests(unittest.TestCase): raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN) res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -143,7 +152,9 @@ class JsonResourceTests(unittest.TestCase): self.fail("shouldn't ever get here") res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foobar") render(request, res, self.reactor) diff --git a/tests/utils.py b/tests/utils.py index 8a94ce0b47..99a3deae21 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -471,7 +471,7 @@ class MockHttpResource(HttpServer): raise KeyError("No event can handle %s" % path) - def register_paths(self, method, path_patterns, callback): + def register_paths(self, method, path_patterns, callback, servlet_name): for path_pattern in path_patterns: self.callbacks.append((method, path_pattern, callback)) -- cgit 1.5.1 From 618bd1ee76a83bd29beb208e9b7097ffcd787099 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 25 Jul 2019 15:59:45 +0100 Subject: Fix some error cases in the caching layer. (#5749) There was some inconsistent behaviour in the caching layer around how exceptions were handled - particularly synchronously-thrown ones. This seems to be most easily handled by pushing the creation of ObservableDeferreds down from CacheDescriptor to the Cache. --- changelog.d/5749.misc | 1 + synapse/util/caches/descriptors.py | 74 +++++++++++++++------------- tests/util/caches/test_descriptors.py | 90 +++++++++++++++++++++++++++++++++-- 3 files changed, 130 insertions(+), 35 deletions(-) create mode 100644 changelog.d/5749.misc (limited to 'tests') diff --git a/changelog.d/5749.misc b/changelog.d/5749.misc new file mode 100644 index 0000000000..48dd61f461 --- /dev/null +++ b/changelog.d/5749.misc @@ -0,0 +1 @@ +Fix some error cases in the caching layer. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 7e69cf55fb..43f66ec4be 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -19,8 +19,7 @@ import logging import threading from collections import namedtuple -import six -from six import itervalues, string_types +from six import itervalues from prometheus_client import Gauge @@ -32,7 +31,6 @@ from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import get_cache_factor_for from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry -from synapse.util.stringutils import to_ascii from . import register_cache @@ -124,7 +122,7 @@ class Cache(object): update_metrics (bool): whether to update the cache hit rate metrics Returns: - Either a Deferred or the raw result + Either an ObservableDeferred or the raw result """ callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _CacheSentinel) @@ -148,9 +146,14 @@ class Cache(object): return default def set(self, key, value, callback=None): + if not isinstance(value, defer.Deferred): + raise TypeError("not a Deferred") + callbacks = [callback] if callback else [] self.check_thread() - entry = CacheEntry(deferred=value, callbacks=callbacks) + observable = ObservableDeferred(value, consumeErrors=True) + observer = defer.maybeDeferred(observable.observe) + entry = CacheEntry(deferred=observable, callbacks=callbacks) existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry: @@ -158,20 +161,31 @@ class Cache(object): self._pending_deferred_cache[key] = entry - def shuffle(result): + def compare_and_pop(): + """Check if our entry is still the one in _pending_deferred_cache, and + if so, pop it. + + Returns true if the entries matched. + """ existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry is entry: + return True + + # oops, the _pending_deferred_cache has been updated since + # we started our query, so we are out of date. + # + # Better put back whatever we took out. (We do it this way + # round, rather than peeking into the _pending_deferred_cache + # and then removing on a match, to make the common case faster) + if existing_entry is not None: + self._pending_deferred_cache[key] = existing_entry + + return False + + def cb(result): + if compare_and_pop(): self.cache.set(key, result, entry.callbacks) else: - # oops, the _pending_deferred_cache has been updated since - # we started our query, so we are out of date. - # - # Better put back whatever we took out. (We do it this way - # round, rather than peeking into the _pending_deferred_cache - # and then removing on a match, to make the common case faster) - if existing_entry is not None: - self._pending_deferred_cache[key] = existing_entry - # we're not going to put this entry into the cache, so need # to make sure that the invalidation callbacks are called. # That was probably done when _pending_deferred_cache was @@ -179,9 +193,16 @@ class Cache(object): # `invalidate` being previously called, in which case it may # not have been. Either way, let's double-check now. entry.invalidate() - return result - entry.deferred.addCallback(shuffle) + def eb(_fail): + compare_and_pop() + entry.invalidate() + + # once the deferred completes, we can move the entry from the + # _pending_deferred_cache to the real cache. + # + observer.addCallbacks(cb, eb) + return observable def prefill(self, key, value, callback=None): callbacks = [callback] if callback else [] @@ -414,20 +435,10 @@ class CacheDescriptor(_CacheDescriptorBase): ret.addErrback(onErr) - # If our cache_key is a string on py2, try to convert to ascii - # to save a bit of space in large caches. Py3 does this - # internally automatically. - if six.PY2 and isinstance(cache_key, string_types): - cache_key = to_ascii(cache_key) - - result_d = ObservableDeferred(ret, consumeErrors=True) - cache.set(cache_key, result_d, callback=invalidate_callback) + result_d = cache.set(cache_key, ret, callback=invalidate_callback) observer = result_d.observe() - if isinstance(observer, defer.Deferred): - return make_deferred_yieldable(observer) - else: - return observer + return make_deferred_yieldable(observer) if self.num_args == 1: wrapped.invalidate = lambda key: cache.invalidate(key[0]) @@ -543,7 +554,7 @@ class CacheListDescriptor(_CacheDescriptorBase): missing.add(arg) if missing: - # we need an observable deferred for each entry in the list, + # we need a deferred for each entry in the list, # which we put in the cache. Each deferred resolves with the # relevant result for that key. deferreds_map = {} @@ -551,8 +562,7 @@ class CacheListDescriptor(_CacheDescriptorBase): deferred = defer.Deferred() deferreds_map[arg] = deferred key = arg_to_cache_key(arg) - observable = ObservableDeferred(deferred) - cache.set(key, observable, callback=invalidate_callback) + cache.set(key, deferred, callback=invalidate_callback) def complete_all(res): # the wrapped function has completed. It returns a diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 56320bbaf9..5713870f48 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -27,6 +27,7 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.caches import descriptors +from synapse.util.caches.descriptors import cached from tests import unittest @@ -55,12 +56,15 @@ class CacheTestCase(unittest.TestCase): d2 = defer.Deferred() cache.set("key2", d2, partial(record_callback, 1)) - # lookup should return the deferreds - self.assertIs(cache.get("key1"), d1) - self.assertIs(cache.get("key2"), d2) + # lookup should return observable deferreds + self.assertFalse(cache.get("key1").has_called()) + self.assertFalse(cache.get("key2").has_called()) # let one of the lookups complete d2.callback("result2") + + # for now at least, the cache will return real results rather than an + # observabledeferred self.assertEqual(cache.get("key2"), "result2") # now do the invalidation @@ -146,6 +150,28 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "chips") obj.mock.assert_not_called() + def test_cache_with_sync_exception(self): + """If the wrapped function throws synchronously, things should continue to work + """ + + class Cls(object): + @cached() + def fn(self, arg1): + raise SynapseError(100, "mai spoon iz too big!!1") + + obj = Cls() + + # this should fail immediately + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should result in a second exception + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" @@ -222,6 +248,9 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(LoggingContext.current_context(), c1) + # the cache should now be empty + self.assertEqual(len(obj.fn.cache.cache), 0) + obj = Cls() # set off a deferred which will do a cache lookup @@ -268,6 +297,61 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "chips") obj.mock.assert_not_called() + def test_cache_iterable(self): + class Cls(object): + def __init__(self): + self.mock = mock.Mock() + + @descriptors.cached(iterable=True) + def fn(self, arg1, arg2): + return self.mock(arg1, arg2) + + obj = Cls() + + obj.mock.return_value = ["spam", "eggs"] + r = obj.fn(1, 2) + self.assertEqual(r, ["spam", "eggs"]) + obj.mock.assert_called_once_with(1, 2) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = ["chips"] + r = obj.fn(1, 3) + self.assertEqual(r, ["chips"]) + obj.mock.assert_called_once_with(1, 3) + obj.mock.reset_mock() + + # the two values should now be cached + self.assertEqual(len(obj.fn.cache.cache), 3) + + r = obj.fn(1, 2) + self.assertEqual(r, ["spam", "eggs"]) + r = obj.fn(1, 3) + self.assertEqual(r, ["chips"]) + obj.mock.assert_not_called() + + def test_cache_iterable_with_sync_exception(self): + """If the wrapped function throws synchronously, things should continue to work + """ + + class Cls(object): + @descriptors.cached(iterable=True) + def fn(self, arg1): + raise SynapseError(100, "mai spoon iz too big!!1") + + obj = Cls() + + # this should fail immediately + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should result in a second exception + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + class CachedListDescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks -- cgit 1.5.1 From 1cad8d7b6f736d86bd53b7f5e8b8417c302fdbd1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 26 Jul 2019 07:38:55 +0100 Subject: Convert RedactionTestCase to modern test style (#5768) --- changelog.d/5768.misc | 1 + tests/storage/test_redaction.py | 74 +++++++++++++++++++++-------------------- 2 files changed, 39 insertions(+), 36 deletions(-) create mode 100644 changelog.d/5768.misc (limited to 'tests') diff --git a/changelog.d/5768.misc b/changelog.d/5768.misc new file mode 100644 index 0000000000..7a9c88b4c2 --- /dev/null +++ b/changelog.d/5768.misc @@ -0,0 +1 @@ +Convert RedactionTestCase to modern test style. diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 1cb471205b..8488b6edc8 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,23 +17,21 @@ from mock import Mock -from twisted.internet import defer - from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.types import RoomID, UserID from tests import unittest -from tests.utils import create_room, setup_test_homeserver +from tests.utils import create_room -class RedactionTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield setup_test_homeserver( - self.addCleanup, resource_for_federation=Mock(), http_client=None +class RedactionTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + resource_for_federation=Mock(), http_client=None ) + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() self.event_builder_factory = hs.get_event_builder_factory() self.event_creation_handler = hs.get_event_creation_handler() @@ -42,11 +41,12 @@ class RedactionTestCase(unittest.TestCase): self.room1 = RoomID.from_string("!abc123:test") - yield create_room(hs, self.room1.to_string(), self.u_alice.to_string()) + self.get_success( + create_room(hs, self.room1.to_string(), self.u_alice.to_string()) + ) self.depth = 1 - @defer.inlineCallbacks def inject_room_member( self, room, user, membership, replaces_state=None, extra_content={} ): @@ -63,15 +63,14 @@ class RedactionTestCase(unittest.TestCase): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) return event - @defer.inlineCallbacks def inject_message(self, room, user, body): self.depth += 1 @@ -86,15 +85,14 @@ class RedactionTestCase(unittest.TestCase): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) return event - @defer.inlineCallbacks def inject_redaction(self, room, event_id, user, reason): builder = self.event_builder_factory.for_room_version( RoomVersions.V1, @@ -108,20 +106,21 @@ class RedactionTestCase(unittest.TestCase): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) - @defer.inlineCallbacks def test_redact(self): - yield self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + self.get_success( + self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + ) - msg_event = yield self.inject_message(self.room1, self.u_alice, "t") + msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t")) # Check event has not been redacted: - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertObjectHasAttributes( { @@ -136,11 +135,11 @@ class RedactionTestCase(unittest.TestCase): # Redact event reason = "Because I said so" - yield self.inject_redaction( - self.room1, msg_event.event_id, self.u_alice, reason + self.get_success( + self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason) ) - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertEqual(msg_event.event_id, event.event_id) @@ -164,15 +163,18 @@ class RedactionTestCase(unittest.TestCase): event.unsigned["redacted_because"], ) - @defer.inlineCallbacks def test_redact_join(self): - yield self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + self.get_success( + self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + ) - msg_event = yield self.inject_room_member( - self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"} + msg_event = self.get_success( + self.inject_room_member( + self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"} + ) ) - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertObjectHasAttributes( { @@ -187,13 +189,13 @@ class RedactionTestCase(unittest.TestCase): # Redact event reason = "Because I said so" - yield self.inject_redaction( - self.room1, msg_event.event_id, self.u_alice, reason + self.get_success( + self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason) ) # Check redaction - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertTrue("redacted_because" in event.unsigned) -- cgit 1.5.1 From 865077f1d1f4866ab874c56b70abbd426fedfb97 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 30 Jul 2019 02:47:27 +1000 Subject: Room Complexity Client Implementation (#5783) --- changelog.d/5783.feature | 1 + docs/sample_config.yaml | 17 +++++++ synapse/config/server.py | 41 ++++++++++++++++ synapse/federation/federation_client.py | 36 ++++++++++++++ synapse/federation/transport/client.py | 31 +++++++++--- synapse/handlers/federation.py | 25 ++++++++++ synapse/handlers/room_member.py | 84 +++++++++++++++++++++++++++++++-- tests/federation/test_complexity.py | 77 ++++++++++++++++++++++++++++-- 8 files changed, 298 insertions(+), 14 deletions(-) create mode 100644 changelog.d/5783.feature (limited to 'tests') diff --git a/changelog.d/5783.feature b/changelog.d/5783.feature new file mode 100644 index 0000000000..18f5a3cb28 --- /dev/null +++ b/changelog.d/5783.feature @@ -0,0 +1 @@ +Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7edf15207a..b92959692d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -278,6 +278,23 @@ listeners: # Used by phonehome stats to group together related servers. #server_context: context +# Resource-constrained Homeserver Settings +# +# If limit_remote_rooms.enabled is True, the room complexity will be +# checked before a user joins a new remote room. If it is above +# limit_remote_rooms.complexity, it will disallow joining or +# instantly leave. +# +# limit_remote_rooms.complexity_error can be set to customise the text +# displayed to the user when a room above the complexity threshold has +# its join cancelled. +# +# Uncomment the below lines to enable: +#limit_remote_rooms: +# enabled: True +# complexity: 1.0 +# complexity_error: "This room is too complex." + # Whether to require a user to be in the room to add an alias to it. # Defaults to 'true'. # diff --git a/synapse/config/server.py b/synapse/config/server.py index 00170f1393..15449695d1 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -18,6 +18,7 @@ import logging import os.path +import attr from netaddr import IPSet from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -38,6 +39,12 @@ DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"] DEFAULT_ROOM_VERSION = "4" +ROOM_COMPLEXITY_TOO_GREAT = ( + "Your homeserver is unable to join rooms this large or complex. " + "Please speak to your server administrator, or upgrade your instance " + "to join this room." +) + class ServerConfig(Config): def read_config(self, config, **kwargs): @@ -247,6 +254,23 @@ class ServerConfig(Config): self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) + @attr.s + class LimitRemoteRoomsConfig(object): + enabled = attr.ib( + validator=attr.validators.instance_of(bool), default=False + ) + complexity = attr.ib( + validator=attr.validators.instance_of((int, float)), default=1.0 + ) + complexity_error = attr.ib( + validator=attr.validators.instance_of(str), + default=ROOM_COMPLEXITY_TOO_GREAT, + ) + + self.limit_remote_rooms = LimitRemoteRoomsConfig( + **config.get("limit_remote_rooms", {}) + ) + bind_port = config.get("bind_port") if bind_port: if config.get("no_tls", False): @@ -617,6 +641,23 @@ class ServerConfig(Config): # Used by phonehome stats to group together related servers. #server_context: context + # Resource-constrained Homeserver Settings + # + # If limit_remote_rooms.enabled is True, the room complexity will be + # checked before a user joins a new remote room. If it is above + # limit_remote_rooms.complexity, it will disallow joining or + # instantly leave. + # + # limit_remote_rooms.complexity_error can be set to customise the text + # displayed to the user when a room above the complexity threshold has + # its join cancelled. + # + # Uncomment the below lines to enable: + #limit_remote_rooms: + # enabled: True + # complexity: 1.0 + # complexity_error: "This room is too complex." + # Whether to require a user to be in the room to add an alias to it. # Defaults to 'true'. # diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 25ed1257f1..6e03ce21af 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -993,3 +993,39 @@ class FederationClient(FederationBase): ) raise RuntimeError("Failed to send to any server.") + + @defer.inlineCallbacks + def get_room_complexity(self, destination, room_id): + """ + Fetch the complexity of a remote room from another server. + + Args: + destination (str): The remote server + room_id (str): The room ID to ask about. + + Returns: + Deferred[dict] or Deferred[None]: Dict contains the complexity + metric versions, while None means we could not fetch the complexity. + """ + try: + complexity = yield self.transport_layer.get_room_complexity( + destination=destination, room_id=room_id + ) + defer.returnValue(complexity) + except CodeMessageException as e: + # We didn't manage to get it -- probably a 404. We are okay if other + # servers don't give it to us. + logger.debug( + "Failed to fetch room complexity via %s for %s, got a %d", + destination, + room_id, + e.code, + ) + except Exception: + logger.exception( + "Failed to fetch room complexity via %s for %s", destination, room_id + ) + + # If we don't manage to find it, return None. It's not an error if a + # server doesn't give it to us. + defer.returnValue(None) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 2a6709ff48..0cea0d2a10 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -21,7 +21,11 @@ from six.moves import urllib from twisted.internet import defer from synapse.api.constants import Membership -from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX +from synapse.api.urls import ( + FEDERATION_UNSTABLE_PREFIX, + FEDERATION_V1_PREFIX, + FEDERATION_V2_PREFIX, +) from synapse.logging.utils import log_function logger = logging.getLogger(__name__) @@ -935,6 +939,23 @@ class TransportLayerClient(object): destination=destination, path=path, data=content, ignore_backoff=True ) + def get_room_complexity(self, destination, room_id): + """ + Args: + destination (str): The remote server + room_id (str): The room ID to ask about. + """ + path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id) + + return self.client.get_json(destination=destination, path=path) + + +def _create_path(federation_prefix, path, *args): + """ + Ensures that all args are url encoded. + """ + return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) + def _create_v1_path(path, *args): """Creates a path against V1 federation API from the path template and @@ -951,9 +972,7 @@ def _create_v1_path(path, *args): Returns: str """ - return FEDERATION_V1_PREFIX + path % tuple( - urllib.parse.quote(arg, "") for arg in args - ) + return _create_path(FEDERATION_V1_PREFIX, path, *args) def _create_v2_path(path, *args): @@ -971,6 +990,4 @@ def _create_v2_path(path, *args): Returns: str """ - return FEDERATION_V2_PREFIX + path % tuple( - urllib.parse.quote(arg, "") for arg in args - ) + return _create_path(FEDERATION_V2_PREFIX, path, *args) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 89b37dbc1c..10160bfe86 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2796,3 +2796,28 @@ class FederationHandler(BaseHandler): ) else: return user_joined_room(self.distributor, user, room_id) + + @defer.inlineCallbacks + def get_room_complexity(self, remote_room_hosts, room_id): + """ + Fetch the complexity of a remote room over federation. + + Args: + remote_room_hosts (list[str]): The remote servers to ask. + room_id (str): The room ID to ask about. + + Returns: + Deferred[dict] or Deferred[None]: Dict contains the complexity + metric versions, while None means we could not fetch the complexity. + """ + + for host in remote_room_hosts: + res = yield self.federation_client.get_room_complexity(host, room_id) + + # We got a result, return it. + if res: + defer.returnValue(res) + + # We fell off the bottom, couldn't get the complexity from anyone. Oh + # well. + defer.returnValue(None) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index baea08ddd0..249a6d9c5d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -26,8 +26,7 @@ from unpaddedbase64 import decode_base64 from twisted.internet import defer -import synapse.server -import synapse.types +from synapse import types from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError from synapse.types import RoomID, UserID @@ -543,7 +542,7 @@ class RoomMemberHandler(object): ), "Sender (%s) must be same as requester (%s)" % (sender, requester.user) assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) else: - requester = synapse.types.create_requester(target_user) + requester = types.create_requester(target_user) prev_event = yield self.event_creation_handler.deduplicate_state_event( event, context @@ -945,6 +944,47 @@ class RoomMemberMasterHandler(RoomMemberHandler): self.distributor.declare("user_joined_room") self.distributor.declare("user_left_room") + @defer.inlineCallbacks + def _is_remote_room_too_complex(self, room_id, remote_room_hosts): + """ + Check if complexity of a remote room is too great. + + Args: + room_id (str) + remote_room_hosts (list[str]) + + Returns: bool of whether the complexity is too great, or None + if unable to be fetched + """ + max_complexity = self.hs.config.limit_remote_rooms.complexity + complexity = yield self.federation_handler.get_room_complexity( + remote_room_hosts, room_id + ) + + if complexity: + if complexity["v1"] > max_complexity: + return True + return False + return None + + @defer.inlineCallbacks + def _is_local_room_too_complex(self, room_id): + """ + Check if the complexity of a local room is too great. + + Args: + room_id (str) + + Returns: bool + """ + max_complexity = self.hs.config.limit_remote_rooms.complexity + complexity = yield self.store.get_room_complexity(room_id) + + if complexity["v1"] > max_complexity: + return True + + return False + @defer.inlineCallbacks def _remote_join(self, requester, remote_room_hosts, room_id, user, content): """Implements RoomMemberHandler._remote_join @@ -952,7 +992,6 @@ class RoomMemberMasterHandler(RoomMemberHandler): # filter ourselves out of remote_room_hosts: do_invite_join ignores it # and if it is the only entry we'd like to return a 404 rather than a # 500. - remote_room_hosts = [ host for host in remote_room_hosts if host != self.hs.hostname ] @@ -960,6 +999,18 @@ class RoomMemberMasterHandler(RoomMemberHandler): if len(remote_room_hosts) == 0: raise SynapseError(404, "No known servers") + if self.hs.config.limit_remote_rooms.enabled: + # Fetch the room complexity + too_complex = yield self._is_remote_room_too_complex( + room_id, remote_room_hosts + ) + if too_complex is True: + raise SynapseError( + code=400, + msg=self.hs.config.limit_remote_rooms.complexity_error, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, + ) + # We don't do an auth check if we are doing an invite # join dance for now, since we're kinda implicitly checking # that we are allowed to join when we decide whether or not we @@ -969,6 +1020,31 @@ class RoomMemberMasterHandler(RoomMemberHandler): ) yield self._user_joined_room(user, room_id) + # Check the room we just joined wasn't too large, if we didn't fetch the + # complexity of it before. + if self.hs.config.limit_remote_rooms.enabled: + if too_complex is False: + # We checked, and we're under the limit. + return + + # Check again, but with the local state events + too_complex = yield self._is_local_room_too_complex(room_id) + + if too_complex is False: + # We're under the limit. + return + + # The room is too large. Leave. + requester = types.create_requester(user, None, False, None) + yield self.update_membership( + requester=requester, target=user, room_id=room_id, action="leave" + ) + raise SynapseError( + code=400, + msg=self.hs.config.limit_remote_rooms.complexity_error, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, + ) + @defer.inlineCallbacks def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): """Implements RoomMemberHandler._remote_reject_invite diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index a5b03005d7..51714a2b06 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -13,12 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + from twisted.internet import defer +from synapse.api.errors import Codes, SynapseError from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.federation.transport import server from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import UserID from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest @@ -33,9 +37,8 @@ class RoomComplexityTests(unittest.HomeserverTestCase): ] def default_config(self, name="test"): - config = super(RoomComplexityTests, self).default_config(name=name) - config["limit_large_remote_room_joins"] = True - config["limit_large_remote_room_complexity"] = 0.05 + config = super().default_config(name=name) + config["limit_remote_rooms"] = {"enabled": True, "complexity": 0.05} return config def prepare(self, reactor, clock, homeserver): @@ -88,3 +91,71 @@ class RoomComplexityTests(unittest.HomeserverTestCase): self.assertEquals(200, channel.code) complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) + + def test_join_too_large(self): + + u1 = self.register_user("u1", "pass") + + handler = self.hs.get_room_member_handler() + fed_transport = self.hs.get_federation_transport_client() + + # Mock out some things, because we don't want to test the whole join + fed_transport.client.get_json = Mock(return_value=defer.succeed({"v1": 9999})) + handler.federation_handler.do_invite_join = Mock(return_value=defer.succeed(1)) + + d = handler._remote_join( + None, + ["otherserver.example"], + "roomid", + UserID.from_string(u1), + {"membership": "join"}, + ) + + self.pump() + + # The request failed with a SynapseError saying the resource limit was + # exceeded. + f = self.get_failure(d, SynapseError) + self.assertEqual(f.value.code, 400, f.value) + self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) + + def test_join_too_large_once_joined(self): + + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + # Ok, this might seem a bit weird -- I want to test that we actually + # leave the room, but I don't want to simulate two servers. So, we make + # a local room, which we say we're joining remotely, even if there's no + # remote, because we mock that out. Then, we'll leave the (actually + # local) room, which will be propagated over federation in a real + # scenario. + room_1 = self.helper.create_room_as(u1, tok=u1_token) + + handler = self.hs.get_room_member_handler() + fed_transport = self.hs.get_federation_transport_client() + + # Mock out some things, because we don't want to test the whole join + fed_transport.client.get_json = Mock(return_value=defer.succeed(None)) + handler.federation_handler.do_invite_join = Mock(return_value=defer.succeed(1)) + + # Artificially raise the complexity + self.hs.get_datastore().get_current_state_event_counts = lambda x: defer.succeed( + 600 + ) + + d = handler._remote_join( + None, + ["otherserver.example"], + room_1, + UserID.from_string(u1), + {"membership": "join"}, + ) + + self.pump() + + # The request failed with a SynapseError saying the resource limit was + # exceeded. + f = self.get_failure(d, SynapseError) + self.assertEqual(f.value.code, 400) + self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) -- cgit 1.5.1 From 4e97eb89e5ed4517e5967a49acf6db987bb96d51 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Jul 2019 22:45:35 +0100 Subject: Handle loops in redaction events --- synapse/storage/events_worker.py | 96 +++++++++++++++------------------------- tests/storage/test_redaction.py | 70 +++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 60 deletions(-) (limited to 'tests') diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index e15e7d86fe..c6fa7f82fd 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -483,7 +483,8 @@ class EventsWorkerStore(SQLBaseStore): if events_to_fetch: logger.debug("Also fetching redaction events %s", events_to_fetch) - result_map = {} + # build a map from event_id to EventBase + event_map = {} for event_id, row in fetched_events.items(): if not row: continue @@ -494,14 +495,37 @@ class EventsWorkerStore(SQLBaseStore): if not allow_rejected and rejected_reason: continue - cache_entry = yield self._get_event_from_row( - row["internal_metadata"], - row["json"], - row["redactions"], - rejected_reason=row["rejected_reason"], - format_version=row["format_version"], + d = json.loads(row["json"]) + internal_metadata = json.loads(row["internal_metadata"]) + + format_version = row["format_version"] + if format_version is None: + # This means that we stored the event before we had the concept + # of a event format version, so it must be a V1 event. + format_version = EventFormatVersions.V1 + + original_ev = event_type_from_format_version(format_version)( + event_dict=d, + internal_metadata_dict=internal_metadata, + rejected_reason=rejected_reason, ) + event_map[event_id] = original_ev + + # finally, we can decide whether each one nededs redacting, and build + # the cache entries. + result_map = {} + for event_id, original_ev in event_map.items(): + redactions = fetched_events[event_id]["redactions"] + redacted_event = self._maybe_redact_event_row( + original_ev, redactions, event_map + ) + + cache_entry = _EventCacheEntry( + event=original_ev, redacted_event=redacted_event + ) + + self._get_event_cache.prefill((event_id,), cache_entry) result_map[event_id] = cache_entry return result_map @@ -615,50 +639,7 @@ class EventsWorkerStore(SQLBaseStore): return event_dict - @defer.inlineCallbacks - def _get_event_from_row( - self, internal_metadata, js, redactions, format_version, rejected_reason=None - ): - """Parse an event row which has been read from the database - - Args: - internal_metadata (str): json-encoded internal_metadata column - js (str): json-encoded event body from event_json - redactions (list[str]): a list of the events which claim to have redacted - this event, from the redactions table - format_version: (str): the 'format_version' column - rejected_reason (str|None): the reason this event was rejected, if any - - Returns: - _EventCacheEntry - """ - with Measure(self._clock, "_get_event_from_row"): - d = json.loads(js) - internal_metadata = json.loads(internal_metadata) - - if format_version is None: - # This means that we stored the event before we had the concept - # of a event format version, so it must be a V1 event. - format_version = EventFormatVersions.V1 - - original_ev = event_type_from_format_version(format_version)( - event_dict=d, - internal_metadata_dict=internal_metadata, - rejected_reason=rejected_reason, - ) - - redacted_event = yield self._maybe_redact_event_row(original_ev, redactions) - - cache_entry = _EventCacheEntry( - event=original_ev, redacted_event=redacted_event - ) - - self._get_event_cache.prefill((original_ev.event_id,), cache_entry) - - return cache_entry - - @defer.inlineCallbacks - def _maybe_redact_event_row(self, original_ev, redactions): + def _maybe_redact_event_row(self, original_ev, redactions, event_map): """Given an event object and a list of possible redacting event ids, determine whether to honour any of those redactions and if so return a redacted event. @@ -666,6 +647,8 @@ class EventsWorkerStore(SQLBaseStore): Args: original_ev (EventBase): redactions (iterable[str]): list of event ids of potential redaction events + event_map (dict[str, EventBase]): other events which have been fetched, in + which we can look up the redaaction events. Map from event id to event. Returns: Deferred[EventBase|None]: if the event should be redacted, a pruned @@ -675,15 +658,9 @@ class EventsWorkerStore(SQLBaseStore): # we choose to ignore redactions of m.room.create events. return None - if original_ev.type == "m.room.redaction": - # ... and redaction events - return None - - redaction_map = yield self._get_events_from_cache_or_db(redactions) - for redaction_id in redactions: - redaction_entry = redaction_map.get(redaction_id) - if not redaction_entry: + redaction_event = event_map.get(redaction_id) + if not redaction_event or redaction_event.rejected_reason: # we don't have the redaction event, or the redaction event was not # authorized. logger.debug( @@ -693,7 +670,6 @@ class EventsWorkerStore(SQLBaseStore): ) continue - redaction_event = redaction_entry.event if redaction_event.room_id != original_ev.room_id: logger.debug( "%s was redacted by %s but redaction was in a different room!", diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 8488b6edc8..d961b81d48 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -17,6 +17,8 @@ from mock import Mock +from twisted.internet import defer + from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.types import RoomID, UserID @@ -216,3 +218,71 @@ class RedactionTestCase(unittest.HomeserverTestCase): }, event.unsigned["redacted_because"], ) + + def test_circular_redaction(self): + redaction_event_id1 = "$redaction1_id:test" + redaction_event_id2 = "$redaction2_id:test" + + class EventIdManglingBuilder: + def __init__(self, base_builder, event_id): + self._base_builder = base_builder + self._event_id = event_id + + @defer.inlineCallbacks + def build(self, prev_event_ids): + built_event = yield self._base_builder.build(prev_event_ids) + built_event.event_id = self._event_id + built_event._event_dict["event_id"] = self._event_id + return built_event + + @property + def room_id(self): + return self._base_builder.room_id + + event_1, context_1 = self.get_success( + self.event_creation_handler.create_new_client_event( + EventIdManglingBuilder( + self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Redaction, + "sender": self.u_alice.to_string(), + "room_id": self.room1.to_string(), + "content": {"reason": "test"}, + "redacts": redaction_event_id2, + }, + ), + redaction_event_id1, + ) + ) + ) + + self.get_success(self.store.persist_event(event_1, context_1)) + + event_2, context_2 = self.get_success( + self.event_creation_handler.create_new_client_event( + EventIdManglingBuilder( + self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Redaction, + "sender": self.u_alice.to_string(), + "room_id": self.room1.to_string(), + "content": {"reason": "test"}, + "redacts": redaction_event_id1, + }, + ), + redaction_event_id2, + ) + ) + ) + self.get_success(self.store.persist_event(event_2, context_2)) + + # fetch one of the redactions + fetched = self.get_success(self.store.get_event(redaction_event_id1)) + + # it should have been redacted + self.assertEqual(fetched.unsigned["redacted_by"], redaction_event_id2) + self.assertEqual( + fetched.unsigned["redacted_because"].event_id, redaction_event_id2 + ) -- cgit 1.5.1 From 8c97f6414cf322fc5b42a92ed0df2fb70bfab3fc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 30 Jul 2019 08:25:02 +0100 Subject: Remove non-functional 'expire_access_token' setting (#5782) The `expire_access_token` didn't do what it sounded like it should do. What it actually did was make Synapse enforce the 'time' caveat on macaroons used as access tokens, but since our access token macaroons never contained such a caveat, it was always a no-op. (The code to add 'time' caveats was removed back in v0.18.5, in #1656) --- changelog.d/5782.removal | 1 + docs/sample_config.yaml | 4 ---- synapse/api/auth.py | 28 ++++------------------ synapse/config/key.py | 6 ----- synapse/handlers/auth.py | 2 +- tests/handlers/test_register.py | 2 +- .../test_resource_limits_server_notices.py | 2 +- tests/utils.py | 1 - 8 files changed, 9 insertions(+), 37 deletions(-) create mode 100644 changelog.d/5782.removal (limited to 'tests') diff --git a/changelog.d/5782.removal b/changelog.d/5782.removal new file mode 100644 index 0000000000..658bf923ab --- /dev/null +++ b/changelog.d/5782.removal @@ -0,0 +1 @@ +Remove non-functional 'expire_access_token' setting. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b92959692d..08316597fa 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -942,10 +942,6 @@ uploads_path: "DATADIR/uploads" # # macaroon_secret_key: -# Used to enable access token expiration. -# -#expire_access_token: False - # a secret which is used to calculate HMACs for form values, to stop # falsification of values. Must be specified for the User Consent # forms to work. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 351790cca4..179644852a 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -410,21 +410,16 @@ class Auth(object): try: user_id = self.get_user_id_from_macaroon(macaroon) - has_expiry = False guest = False for caveat in macaroon.caveats: - if caveat.caveat_id.startswith("time "): - has_expiry = True - elif caveat.caveat_id == "guest = true": + if caveat.caveat_id == "guest = true": guest = True - self.validate_macaroon( - macaroon, rights, self.hs.config.expire_access_token, user_id=user_id - ) + self.validate_macaroon(macaroon, rights, user_id=user_id) except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError): raise InvalidClientTokenError("Invalid macaroon passed.") - if not has_expiry and rights == "access": + if rights == "access": self.token_cache[token] = (user_id, guest) return user_id, guest @@ -450,7 +445,7 @@ class Auth(object): return caveat.caveat_id[len(user_prefix) :] raise InvalidClientTokenError("No user caveat in macaroon") - def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id): + def validate_macaroon(self, macaroon, type_string, user_id): """ validate that a Macaroon is understood by and was signed by this server. @@ -458,7 +453,6 @@ class Auth(object): macaroon(pymacaroons.Macaroon): The macaroon to validate type_string(str): The kind of token required (e.g. "access", "delete_pusher") - verify_expiry(bool): Whether to verify whether the macaroon has expired. user_id (str): The user_id required """ v = pymacaroons.Verifier() @@ -471,19 +465,7 @@ class Auth(object): v.satisfy_exact("type = " + type_string) v.satisfy_exact("user_id = %s" % user_id) v.satisfy_exact("guest = true") - - # verify_expiry should really always be True, but there exist access - # tokens in the wild which expire when they should not, so we can't - # enforce expiry yet (so we have to allow any caveat starting with - # 'time < ' in access tokens). - # - # On the other hand, short-term login tokens (as used by CAS login, for - # example) have an expiry time which we do want to enforce. - - if verify_expiry: - v.satisfy_general(self._verify_expiry) - else: - v.satisfy_general(lambda c: c.startswith("time < ")) + v.satisfy_general(self._verify_expiry) # access_tokens include a nonce for uniqueness: any value is acceptable v.satisfy_general(lambda c: c.startswith("nonce = ")) diff --git a/synapse/config/key.py b/synapse/config/key.py index 8fc74f9cdf..fe8386985c 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -116,8 +116,6 @@ class KeyConfig(Config): seed = bytes(self.signing_key[0]) self.macaroon_secret_key = hashlib.sha256(seed).digest() - self.expire_access_token = config.get("expire_access_token", False) - # a secret which is used to calculate HMACs for form values, to stop # falsification of values self.form_secret = config.get("form_secret", None) @@ -144,10 +142,6 @@ class KeyConfig(Config): # %(macaroon_secret_key)s - # Used to enable access token expiration. - # - #expire_access_token: False - # a secret which is used to calculate HMACs for form values, to stop # falsification of values. Must be specified for the User Consent # forms to work. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 05be5b7c48..0f3ebf7ef8 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -860,7 +860,7 @@ class AuthHandler(BaseHandler): try: macaroon = pymacaroons.Macaroon.deserialize(login_token) user_id = auth_api.get_user_id_from_macaroon(macaroon) - auth_api.validate_macaroon(macaroon, "login", True, user_id) + auth_api.validate_macaroon(macaroon, "login", user_id) except Exception: raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN) self.ratelimit_login_per_account(user_id) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 99dce45cfe..0ad0a88165 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -44,7 +44,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): hs_config["max_mau_value"] = 50 hs_config["limit_usage_by_mau"] = True - hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True) + hs = self.setup_test_homeserver(config=hs_config) return hs def prepare(self, reactor, clock, hs): diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 984feb623f..cdf89e3383 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -36,7 +36,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): "room_name": "Server Notices", } - hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True) + hs = self.setup_test_homeserver(config=hs_config) return hs def prepare(self, reactor, clock, hs): diff --git a/tests/utils.py b/tests/utils.py index 6350646263..f1eb9a545c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -126,7 +126,6 @@ def default_config(name, parse=False): "enable_registration": True, "enable_registration_captcha": False, "macaroon_secret_key": "not even a little secret", - "expire_access_token": False, "trusted_third_party_id_servers": [], "room_invite_state_types": [], "password_providers": [], -- cgit 1.5.1 From a9bcae9f50a14dc020634c532732c1a86b696d92 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 17:42:56 +0100 Subject: Share SSL options for well-known requests --- synapse/crypto/context_factory.py | 8 ++++++++ synapse/http/federation/matrix_federation_agent.py | 16 +++++----------- tests/http/federation/test_matrix_federation_agent.py | 12 ++++++------ 3 files changed, 19 insertions(+), 17 deletions(-) (limited to 'tests') diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 4f48e8e88d..06e63a96b5 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -31,6 +31,7 @@ from twisted.internet.ssl import ( platformTrust, ) from twisted.python.failure import Failure +from twisted.web.iweb import IPolicyForHTTPS logger = logging.getLogger(__name__) @@ -74,6 +75,7 @@ class ServerContextFactory(ContextFactory): return self._context +@implementer(IPolicyForHTTPS) class ClientTLSOptionsFactory(object): """Factory for Twisted SSLClientConnectionCreators that are used to make connections to remote servers for federation. @@ -146,6 +148,12 @@ class ClientTLSOptionsFactory(object): f = Failure() tls_protocol.failVerification(f) + def creatorForNetloc(self, hostname, port): + """Implements the IPolicyForHTTPS interace so that this can be passed + directly to agents. + """ + return self.get_options(hostname) + @implementer(IOpenSSLClientConnectionCreator) class SSLClientConnectionCreator(object): diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index c03ddb724f..a0d5139839 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -64,10 +64,6 @@ class MatrixFederationAgent(object): tls_client_options_factory (ClientTLSOptionsFactory|None): factory to use for fetching client tls options, or none to disable TLS. - _well_known_tls_policy (IPolicyForHTTPS|None): - TLS policy to use for fetching .well-known files. None to use a default - (browser-like) implementation. - _srv_resolver (SrvResolver|None): SRVResolver impl to use for looking up SRV records. None to use a default implementation. @@ -81,7 +77,6 @@ class MatrixFederationAgent(object): self, reactor, tls_client_options_factory, - _well_known_tls_policy=None, _srv_resolver=None, _well_known_cache=well_known_cache, ): @@ -98,13 +93,12 @@ class MatrixFederationAgent(object): self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 - agent_args = {} - if _well_known_tls_policy is not None: - # the param is called 'contextFactory', but actually passing a - # contextfactory is deprecated, and it expects an IPolicyForHTTPS. - agent_args["contextFactory"] = _well_known_tls_policy _well_known_agent = RedirectAgent( - Agent(self._reactor, pool=self._pool, **agent_args) + Agent( + self._reactor, + pool=self._pool, + contextFactory=tls_client_options_factory, + ) ) self._well_known_agent = _well_known_agent diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index b906686b49..4255add097 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -75,7 +75,6 @@ class MatrixFederationAgentTests(TestCase): config_dict = default_config("test", parse=False) config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()] - # config_dict["trusted_key_servers"] = [] self._config = config = HomeServerConfig() config.parse_config_dict(config_dict, "", "") @@ -83,7 +82,6 @@ class MatrixFederationAgentTests(TestCase): self.agent = MatrixFederationAgent( reactor=self.reactor, tls_client_options_factory=ClientTLSOptionsFactory(config), - _well_known_tls_policy=TrustingTLSPolicyForHTTPS(), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, ) @@ -691,16 +689,18 @@ class MatrixFederationAgentTests(TestCase): not signed by a CA """ - # we use the same test server as the other tests, but use an agent - # with _well_known_tls_policy left to the default, which will not - # trust it (since the presented cert is signed by a test CA) + # we use the same test server as the other tests, but use an agent with + # the config left to the default, which will not trust it (since the + # presented cert is signed by a test CA) self.mock_resolver.resolve_service.side_effect = lambda _: [] self.reactor.lookups["testserv"] = "1.2.3.4" + config = default_config("test", parse=True) + agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory(self._config), + tls_client_options_factory=ClientTLSOptionsFactory(config), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, ) -- cgit 1.5.1 From 8f15832950148280ed5a9cf28b74971abfd09e4f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Jul 2019 20:39:22 +1000 Subject: Remove DelayedCall debugging from test runs (#5787) --- changelog.d/5787.misc | 1 + tests/unittest.py | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) create mode 100644 changelog.d/5787.misc (limited to 'tests') diff --git a/changelog.d/5787.misc b/changelog.d/5787.misc new file mode 100644 index 0000000000..ead0b04b62 --- /dev/null +++ b/changelog.d/5787.misc @@ -0,0 +1 @@ +Remove DelayedCall debugging from the test suite, as it is no longer required in the vast majority of Synapse's tests. diff --git a/tests/unittest.py b/tests/unittest.py index f5fae21317..561cebc223 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -23,8 +23,6 @@ from mock import Mock from canonicaljson import json -import twisted -import twisted.logger from twisted.internet.defer import Deferred, succeed from twisted.python.threadpool import ThreadPool from twisted.trial import unittest @@ -80,10 +78,6 @@ class TestCase(unittest.TestCase): @around(self) def setUp(orig): - # enable debugging of delayed calls - this means that we get a - # traceback when a unit test exits leaving things on the reactor. - twisted.internet.base.DelayedCall.debug = True - # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if LoggingContext.current_context() is not LoggingContext.sentinel: -- cgit 1.5.1 From bc3550352830bef2c484dd5d3dd5ff1fce29a6fc Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 1 Aug 2019 12:00:08 +0200 Subject: Add tests --- tests/rest/client/v2_alpha/test_register.py | 37 +++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'tests') diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 89a3f95c0a..bb867150f4 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -323,6 +323,8 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): "renew_at": 172800000, # Time in ms for 2 days "renew_by_email_enabled": True, "renew_email_subject": "Renew your account", + "account_renewed_html_path": "account_renewed.html", + "invalid_token_html_path": "invalid_token.html", } # Email config. @@ -373,6 +375,19 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + # Check that we're getting HTML back. + content_type = None + for header in channel.result.get("headers", []): + if header[0] == b"Content-Type": + content_type = header[1] + self.assertEqual(content_type, b"text/html; charset=utf-8", channel.result) + + # Check that the HTML we're getting is the one we expect on a successful renewal. + expected_html = self.hs.config.account_validity.account_renewed_html_content + self.assertEqual( + channel.result["body"], expected_html.encode("utf8"), channel.result + ) + # Move 3 days forward. If the renewal failed, every authed request with # our access token should be denied from now, otherwise they should # succeed. @@ -381,6 +396,28 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + def test_renewal_invalid_token(self): + # Hit the renewal endpoint with an invalid token and check that it behaves as + # expected, i.e. that it responds with 404 Not Found and the correct HTML. + url = "/_matrix/client/unstable/account_validity/renew?token=123" + request, channel = self.make_request(b"GET", url) + self.render(request) + self.assertEquals(channel.result["code"], b"404", channel.result) + + # Check that we're getting HTML back. + content_type = None + for header in channel.result.get("headers", []): + if header[0] == b"Content-Type": + content_type = header[1] + self.assertEqual(content_type, b"text/html; charset=utf-8", channel.result) + + # Check that the HTML we're getting is the one we expect when using an + # invalid/unknown token. + expected_html = self.hs.config.account_validity.invalid_token_html_content + self.assertEqual( + channel.result["body"], expected_html.encode("utf8"), channel.result + ) + def test_manual_email_send(self): self.email_attempts = [] -- cgit 1.5.1 From af9f1c07646031bf267aa20f2629d5b96c6603b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Aug 2019 16:27:46 +0100 Subject: Add a lower bound for TTL on well known results. It costs both us and the remote server for us to fetch the well known for every single request we send, so we add a minimum cache period. This is set to 5m so that we still honour the basic premise of "refetch frequently". --- synapse/http/federation/matrix_federation_agent.py | 4 ++++ tests/http/federation/test_matrix_federation_agent.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index a0d5139839..79e488c942 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -47,6 +47,9 @@ WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600 # cap for .well-known cache period WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600 +# lower bound for .well-known cache period +WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60 + logger = logging.getLogger(__name__) well_known_cache = TTLCache("well-known") @@ -356,6 +359,7 @@ class MatrixFederationAgent(object): cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) else: cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) + cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) return (result, cache_period) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 4255add097..5e709c0c17 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -953,7 +953,7 @@ class MatrixFederationAgentTests(TestCase): well_known_server = self._handle_well_known_connection( client_factory, expected_sni=b"testserv", - response_headers={b"Cache-Control": b"max-age=10"}, + response_headers={b"Cache-Control": b"max-age=1000"}, content=b'{ "m.server": "target-server" }', ) @@ -969,7 +969,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(r, b"target-server") # expire the cache - self.reactor.pump((10.0,)) + self.reactor.pump((1000.0,)) # now it should connect again fetch_d = self.do_get_well_known(b"testserv") -- cgit 1.5.1 From 107ad133fcf5b5a87e2aa1d53ab415aa39a01266 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Aug 2019 15:36:38 +0100 Subject: Move well known lookup into a separate clas --- synapse/http/federation/matrix_federation_agent.py | 166 ++----------------- synapse/http/federation/well_known_resolver.py | 184 +++++++++++++++++++++ .../federation/test_matrix_federation_agent.py | 39 +++-- 3 files changed, 216 insertions(+), 173 deletions(-) create mode 100644 synapse/http/federation/well_known_resolver.py (limited to 'tests') diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 79e488c942..71a15f434d 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -12,10 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json + import logging -import random -import time import attr from netaddr import IPAddress @@ -24,34 +22,16 @@ from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.internet.interfaces import IStreamClientEndpoint -from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent, readBody -from twisted.web.http import stringToDatetime +from twisted.web.client import URI, Agent, HTTPConnectionPool from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list +from synapse.http.federation.well_known_resolver import WellKnownResolver from synapse.logging.context import make_deferred_yieldable from synapse.util import Clock -from synapse.util.caches.ttlcache import TTLCache -from synapse.util.metrics import Measure - -# period to cache .well-known results for by default -WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600 - -# jitter to add to the .well-known default cache ttl -WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60 - -# period to cache failure to fetch .well-known for -WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600 - -# cap for .well-known cache period -WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600 - -# lower bound for .well-known cache period -WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60 logger = logging.getLogger(__name__) -well_known_cache = TTLCache("well-known") @implementer(IAgent) @@ -81,7 +61,7 @@ class MatrixFederationAgent(object): reactor, tls_client_options_factory, _srv_resolver=None, - _well_known_cache=well_known_cache, + _well_known_cache=None, ): self._reactor = reactor self._clock = Clock(reactor) @@ -96,20 +76,15 @@ class MatrixFederationAgent(object): self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 - _well_known_agent = RedirectAgent( - Agent( + self._well_known_resolver = WellKnownResolver( + self._reactor, + agent=Agent( self._reactor, pool=self._pool, contextFactory=tls_client_options_factory, - ) + ), + well_known_cache=_well_known_cache, ) - self._well_known_agent = _well_known_agent - - # our cache of .well-known lookup results, mapping from server name - # to delegated name. The values can be: - # `bytes`: a valid server-name - # `None`: there is no (valid) .well-known here - self._well_known_cache = _well_known_cache @defer.inlineCallbacks def request(self, method, uri, headers=None, bodyProducer=None): @@ -220,7 +195,10 @@ class MatrixFederationAgent(object): if lookup_well_known: # try a .well-known lookup - well_known_server = yield self._get_well_known(parsed_uri.host) + well_known_result = yield self._well_known_resolver.get_well_known( + parsed_uri.host + ) + well_known_server = well_known_result.delegated_server if well_known_server: # if we found a .well-known, start again, but don't do another @@ -283,86 +261,6 @@ class MatrixFederationAgent(object): target_port=port, ) - @defer.inlineCallbacks - def _get_well_known(self, server_name): - """Attempt to fetch and parse a .well-known file for the given server - - Args: - server_name (bytes): name of the server, from the requested url - - Returns: - Deferred[bytes|None]: either the new server name, from the .well-known, or - None if there was no .well-known file. - """ - try: - result = self._well_known_cache[server_name] - except KeyError: - # TODO: should we linearise so that we don't end up doing two .well-known - # requests for the same server in parallel? - with Measure(self._clock, "get_well_known"): - result, cache_period = yield self._do_get_well_known(server_name) - - if cache_period > 0: - self._well_known_cache.set(server_name, result, cache_period) - - return result - - @defer.inlineCallbacks - def _do_get_well_known(self, server_name): - """Actually fetch and parse a .well-known, without checking the cache - - Args: - server_name (bytes): name of the server, from the requested url - - Returns: - Deferred[Tuple[bytes|None|object],int]: - result, cache period, where result is one of: - - the new server name from the .well-known (as a `bytes`) - - None if there was no .well-known file. - - INVALID_WELL_KNOWN if the .well-known was invalid - """ - uri = b"https://%s/.well-known/matrix/server" % (server_name,) - uri_str = uri.decode("ascii") - logger.info("Fetching %s", uri_str) - try: - response = yield make_deferred_yieldable( - self._well_known_agent.request(b"GET", uri) - ) - body = yield make_deferred_yieldable(readBody(response)) - if response.code != 200: - raise Exception("Non-200 response %s" % (response.code,)) - - parsed_body = json.loads(body.decode("utf-8")) - logger.info("Response from .well-known: %s", parsed_body) - if not isinstance(parsed_body, dict): - raise Exception("not a dict") - if "m.server" not in parsed_body: - raise Exception("Missing key 'm.server'") - except Exception as e: - logger.info("Error fetching %s: %s", uri_str, e) - - # add some randomness to the TTL to avoid a stampeding herd every hour - # after startup - cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD - cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) - return (None, cache_period) - - result = parsed_body["m.server"].encode("ascii") - - cache_period = _cache_period_from_headers( - response.headers, time_now=self._reactor.seconds - ) - if cache_period is None: - cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD - # add some randomness to the TTL to avoid a stampeding herd every 24 hours - # after startup - cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) - else: - cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) - cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) - - return (result, cache_period) - @implementer(IStreamClientEndpoint) class LoggingHostnameEndpoint(object): @@ -378,44 +276,6 @@ class LoggingHostnameEndpoint(object): return self.ep.connect(protocol_factory) -def _cache_period_from_headers(headers, time_now=time.time): - cache_controls = _parse_cache_control(headers) - - if b"no-store" in cache_controls: - return 0 - - if b"max-age" in cache_controls: - try: - max_age = int(cache_controls[b"max-age"]) - return max_age - except ValueError: - pass - - expires = headers.getRawHeaders(b"expires") - if expires is not None: - try: - expires_date = stringToDatetime(expires[-1]) - return expires_date - time_now() - except ValueError: - # RFC7234 says 'A cache recipient MUST interpret invalid date formats, - # especially the value "0", as representing a time in the past (i.e., - # "already expired"). - return 0 - - return None - - -def _parse_cache_control(headers): - cache_controls = {} - for hdr in headers.getRawHeaders(b"cache-control", []): - for directive in hdr.split(b","): - splits = [x.strip() for x in directive.split(b"=", 1)] - k = splits[0].lower() - v = splits[1] if len(splits) > 1 else None - cache_controls[k] = v - return cache_controls - - @attr.s class _RoutingResult(object): """The result returned by `_route_matrix_uri`. diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py new file mode 100644 index 0000000000..bab4ab015e --- /dev/null +++ b/synapse/http/federation/well_known_resolver.py @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import random +import time + +import attr + +from twisted.internet import defer +from twisted.web.client import RedirectAgent, readBody +from twisted.web.http import stringToDatetime + +from synapse.logging.context import make_deferred_yieldable +from synapse.util import Clock +from synapse.util.caches.ttlcache import TTLCache +from synapse.util.metrics import Measure + +# period to cache .well-known results for by default +WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600 + +# jitter to add to the .well-known default cache ttl +WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60 + +# period to cache failure to fetch .well-known for +WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600 + +# cap for .well-known cache period +WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600 + +# lower bound for .well-known cache period +WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60 + +logger = logging.getLogger(__name__) + + +@attr.s(slots=True, frozen=True) +class WellKnownLookupResult(object): + delegated_server = attr.ib() + + +class WellKnownResolver(object): + """Handles well-known lookups for matrix servers. + """ + + def __init__(self, reactor, agent, well_known_cache=None): + self._reactor = reactor + self._clock = Clock(reactor) + + if well_known_cache is None: + well_known_cache = TTLCache("well-known") + + self._well_known_cache = well_known_cache + self._well_known_agent = RedirectAgent(agent) + + @defer.inlineCallbacks + def get_well_known(self, server_name): + """Attempt to fetch and parse a .well-known file for the given server + + Args: + server_name (bytes): name of the server, from the requested url + + Returns: + Deferred[WellKnownLookupResult]: The result of the lookup + """ + try: + result = self._well_known_cache[server_name] + except KeyError: + # TODO: should we linearise so that we don't end up doing two .well-known + # requests for the same server in parallel? + with Measure(self._clock, "get_well_known"): + result, cache_period = yield self._do_get_well_known(server_name) + + if cache_period > 0: + self._well_known_cache.set(server_name, result, cache_period) + + return WellKnownLookupResult(delegated_server=result) + + @defer.inlineCallbacks + def _do_get_well_known(self, server_name): + """Actually fetch and parse a .well-known, without checking the cache + + Args: + server_name (bytes): name of the server, from the requested url + + Returns: + Deferred[Tuple[bytes|None|object],int]: + result, cache period, where result is one of: + - the new server name from the .well-known (as a `bytes`) + - None if there was no .well-known file. + - INVALID_WELL_KNOWN if the .well-known was invalid + """ + uri = b"https://%s/.well-known/matrix/server" % (server_name,) + uri_str = uri.decode("ascii") + logger.info("Fetching %s", uri_str) + try: + response = yield make_deferred_yieldable( + self._well_known_agent.request(b"GET", uri) + ) + body = yield make_deferred_yieldable(readBody(response)) + if response.code != 200: + raise Exception("Non-200 response %s" % (response.code,)) + + parsed_body = json.loads(body.decode("utf-8")) + logger.info("Response from .well-known: %s", parsed_body) + if not isinstance(parsed_body, dict): + raise Exception("not a dict") + if "m.server" not in parsed_body: + raise Exception("Missing key 'm.server'") + except Exception as e: + logger.info("Error fetching %s: %s", uri_str, e) + + # add some randomness to the TTL to avoid a stampeding herd every hour + # after startup + cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD + cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) + return (None, cache_period) + + result = parsed_body["m.server"].encode("ascii") + + cache_period = _cache_period_from_headers( + response.headers, time_now=self._reactor.seconds + ) + if cache_period is None: + cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD + # add some randomness to the TTL to avoid a stampeding herd every 24 hours + # after startup + cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) + else: + cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) + cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) + + return (result, cache_period) + + +def _cache_period_from_headers(headers, time_now=time.time): + cache_controls = _parse_cache_control(headers) + + if b"no-store" in cache_controls: + return 0 + + if b"max-age" in cache_controls: + try: + max_age = int(cache_controls[b"max-age"]) + return max_age + except ValueError: + pass + + expires = headers.getRawHeaders(b"expires") + if expires is not None: + try: + expires_date = stringToDatetime(expires[-1]) + return expires_date - time_now() + except ValueError: + # RFC7234 says 'A cache recipient MUST interpret invalid date formats, + # especially the value "0", as representing a time in the past (i.e., + # "already expired"). + return 0 + + return None + + +def _parse_cache_control(headers): + cache_controls = {} + for hdr in headers.getRawHeaders(b"cache-control", []): + for directive in hdr.split(b","): + splits = [x.strip() for x in directive.split(b"=", 1)] + k = splits[0].lower() + v = splits[1] if len(splits) > 1 else None + cache_controls[k] = v + return cache_controls diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 5e709c0c17..1435baede2 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -25,17 +25,19 @@ from twisted.internet._sslverify import ClientTLSOptions, OpenSSLCertificateOpti from twisted.internet.protocol import Factory from twisted.protocols.tls import TLSMemoryBIOFactory from twisted.web._newclient import ResponseNeverReceived +from twisted.web.client import Agent from twisted.web.http import HTTPChannel from twisted.web.http_headers import Headers from twisted.web.iweb import IPolicyForHTTPS from synapse.config.homeserver import HomeServerConfig from synapse.crypto.context_factory import ClientTLSOptionsFactory -from synapse.http.federation.matrix_federation_agent import ( - MatrixFederationAgent, +from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent +from synapse.http.federation.srv_resolver import Server +from synapse.http.federation.well_known_resolver import ( + WellKnownResolver, _cache_period_from_headers, ) -from synapse.http.federation.srv_resolver import Server from synapse.logging.context import LoggingContext from synapse.util.caches.ttlcache import TTLCache @@ -79,9 +81,10 @@ class MatrixFederationAgentTests(TestCase): self._config = config = HomeServerConfig() config.parse_config_dict(config_dict, "", "") + self.tls_factory = ClientTLSOptionsFactory(config) self.agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory(config), + tls_client_options_factory=self.tls_factory, _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, ) @@ -928,20 +931,16 @@ class MatrixFederationAgentTests(TestCase): self.reactor.pump((0.1,)) self.successResultOf(test_d) - @defer.inlineCallbacks - def do_get_well_known(self, serv): - try: - result = yield self.agent._get_well_known(serv) - logger.info("Result from well-known fetch: %s", result) - except Exception as e: - logger.warning("Error fetching well-known: %s", e) - raise - return result - def test_well_known_cache(self): + well_known_resolver = WellKnownResolver( + self.reactor, + Agent(self.reactor, contextFactory=self.tls_factory), + well_known_cache=self.well_known_cache, + ) + self.reactor.lookups["testserv"] = "1.2.3.4" - fetch_d = self.do_get_well_known(b"testserv") + fetch_d = well_known_resolver.get_well_known(b"testserv") # there should be an attempt to connect on port 443 for the .well-known clients = self.reactor.tcpClients @@ -958,21 +957,21 @@ class MatrixFederationAgentTests(TestCase): ) r = self.successResultOf(fetch_d) - self.assertEqual(r, b"target-server") + self.assertEqual(r.delegated_server, b"target-server") # close the tcp connection well_known_server.loseConnection() # repeat the request: it should hit the cache - fetch_d = self.do_get_well_known(b"testserv") + fetch_d = well_known_resolver.get_well_known(b"testserv") r = self.successResultOf(fetch_d) - self.assertEqual(r, b"target-server") + self.assertEqual(r.delegated_server, b"target-server") # expire the cache self.reactor.pump((1000.0,)) # now it should connect again - fetch_d = self.do_get_well_known(b"testserv") + fetch_d = well_known_resolver.get_well_known(b"testserv") self.assertEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) @@ -986,7 +985,7 @@ class MatrixFederationAgentTests(TestCase): ) r = self.successResultOf(fetch_d) - self.assertEqual(r, b"other-server") + self.assertEqual(r.delegated_server, b"other-server") class TestCachePeriodFromHeaders(TestCase): -- cgit 1.5.1