From fd99787162113857119c033355548c5b3769a309 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 27 Sep 2018 14:53:58 -0600 Subject: Incorporate Dave's work for GDPR login flows As per https://github.com/vector-im/riot-web/issues/7168#issuecomment-419996117 --- synapse/api/constants.py | 1 + synapse/handlers/auth.py | 4 ++++ synapse/rest/client/v2_alpha/auth.py | 20 ++++++++++++++++++++ synapse/rest/client/v2_alpha/register.py | 15 +++++++++++++++ 4 files changed, 40 insertions(+) (limited to 'synapse') diff --git a/synapse/api/constants.py b/synapse/api/constants.py index c2630c4c64..b2815da0ab 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -51,6 +51,7 @@ class LoginType(object): EMAIL_IDENTITY = u"m.login.email.identity" MSISDN = u"m.login.msisdn" RECAPTCHA = u"m.login.recaptcha" + TERMS = u"m.login.terms" DUMMY = u"m.login.dummy" # Only for C/S API v1 diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2a5eab124f..f08a2cdd7e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -59,6 +59,7 @@ class AuthHandler(BaseHandler): LoginType.EMAIL_IDENTITY: self._check_email_identity, LoginType.MSISDN: self._check_msisdn, LoginType.DUMMY: self._check_dummy_auth, + LoginType.TERMS: self._check_terms_auth, } self.bcrypt_rounds = hs.config.bcrypt_rounds @@ -431,6 +432,9 @@ class AuthHandler(BaseHandler): def _check_dummy_auth(self, authdict, _): return defer.succeed(True) + def _check_terms_auth(self, authdict, _): + return defer.succeed(True) + @defer.inlineCallbacks def _check_threepid(self, medium, authdict): if 'threepid_creds' not in authdict: diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bd8b5f4afa..bc3bfee4a0 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -130,6 +130,26 @@ class AuthRestServlet(RestServlet): request.setHeader(b"Content-Type", b"text/html; charset=utf-8") request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + request.write(html_bytes) + finish_request(request) + defer.returnValue(None) + elif stagetype == LoginType.TERMS: + session = request.args['session'][0] + authdict = { + 'session': session, + } + success = yield self.auth_handler.add_oob_auth( + LoginType.TERMS, + authdict, + self.hs.get_ip_from_request(request) + ) + + html = "hai" + html_bytes = html.encode("utf8") + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + request.write(html_bytes) finish_request(request) defer.returnValue(None) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 192f52e462..dedf5269ed 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -359,6 +359,21 @@ class RegisterRestServlet(RestServlet): [LoginType.MSISDN, LoginType.EMAIL_IDENTITY] ]) + if self.hs.config.block_events_without_consent_error is not None: + new_flows = [] + for flow in flows: + # To only allow registration if completing GDPR auth, + # making clients that don't support it use fallback auth. + #flow.append(LoginType.TERMS) + + # or to duplicate all the flows above with the GDPR flow on the + # end so clients that support it can use it but clients that don't + # continue to consent via the DM from server notices bot. + new_flows.extend([ + flow + [LoginType.TERMS] + ]) + flows.extend(new_flows) + auth_result, params, session_id = yield self.auth_handler.check_auth( flows, body, self.hs.get_ip_from_request(request) ) -- cgit 1.4.1 From 149c4f176563bd8c976d9c4601825753f7292b12 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 15:25:53 -0600 Subject: Supply params for terms auth stage As per https://github.com/matrix-org/matrix-doc/pull/1692 --- synapse/handlers/auth.py | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'synapse') diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f08a2cdd7e..d6a19b74e9 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -466,6 +466,15 @@ class AuthHandler(BaseHandler): def _get_params_recaptcha(self): return {"public_key": self.hs.config.recaptcha_public_key} + def _get_params_terms(self): + return { + "policies": [{ + "name": "Privacy Policy", + "version": self.hs.config.user_consent_version, + "url": "%s/_matrix/consent/public" % (self.hs.config.public_baseurl,), + }], + } + def _auth_dict_for_flows(self, flows, session): public_flows = [] for f in flows: -- cgit 1.4.1 From 3099d96dba1c5a24cdd81575f6b8b8e07a9e8c94 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 15:54:19 -0600 Subject: Flesh out the fallback auth for terms --- synapse/rest/client/v2_alpha/auth.py | 74 ++++++++++++++++++++++++++++++++---- 1 file changed, 67 insertions(+), 7 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bc3bfee4a0..f86f09adcf 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -68,6 +68,29 @@ function captchaDone() { """ +TERMS_TEMPLATE = """ + + +Authentication + + + + +
+
+

+ Please click the button below if you agree to the + privacy policy of this homeserver. +

+ + +
+
+ + +""" + SUCCESS_TEMPLATE = """ @@ -138,13 +161,16 @@ class AuthRestServlet(RestServlet): authdict = { 'session': session, } - success = yield self.auth_handler.add_oob_auth( - LoginType.TERMS, - authdict, - self.hs.get_ip_from_request(request) - ) - html = "hai" + html = TERMS_TEMPLATE % { + 'session': session, + 'terms_url': "%s/_matrix/consent/public" % ( + self.hs.config.public_baseurl, + ), + 'myurl': "%s/auth/%s/fallback/web" % ( + CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS + ), + } html_bytes = html.encode("utf8") request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") @@ -159,7 +185,7 @@ class AuthRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, stagetype): yield - if stagetype == "m.login.recaptcha": + if stagetype == LoginType.RECAPTCHA: if ('g-recaptcha-response' not in request.args or len(request.args['g-recaptcha-response'])) == 0: raise SynapseError(400, "No captcha response supplied") @@ -198,6 +224,40 @@ class AuthRestServlet(RestServlet): request.write(html_bytes) finish_request(request) + defer.returnValue(None) + elif stagetype == LoginType.TERMS: + if ('session' not in request.args or + len(request.args['session'])) == 0: + raise SynapseError(400, "No session supplied") + + session = request.args['session'][0] + authdict = {'session': session} + + success = yield self.auth_handler.add_oob_auth( + LoginType.TERMS, + authdict, + self.hs.get_ip_from_request(request) + ) + + if success: + html = SUCCESS_TEMPLATE + else: + html = TERMS_TEMPLATE % { + 'session': session, + 'terms_url': "%s/_matrix/consent/public" % ( + self.hs.config.public_baseurl, + ), + 'myurl': "%s/auth/%s/fallback/web" % ( + CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS + ), + } + html_bytes = html.encode("utf8") + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + + request.write(html_bytes) + finish_request(request) defer.returnValue(None) else: raise SynapseError(404, "Unknown auth stage type") -- cgit 1.4.1 From dfcad5fad5fbfac0a9182853d1acfe410e7cd888 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 15:54:32 -0600 Subject: Make the terms flow requried --- synapse/rest/client/v2_alpha/register.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index dedf5269ed..78e63447a7 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -364,14 +364,14 @@ class RegisterRestServlet(RestServlet): for flow in flows: # To only allow registration if completing GDPR auth, # making clients that don't support it use fallback auth. - #flow.append(LoginType.TERMS) + flow.append(LoginType.TERMS) # or to duplicate all the flows above with the GDPR flow on the # end so clients that support it can use it but clients that don't # continue to consent via the DM from server notices bot. - new_flows.extend([ - flow + [LoginType.TERMS] - ]) + #new_flows.extend([ + # flow + [LoginType.TERMS] + #]) flows.extend(new_flows) auth_result, params, session_id = yield self.auth_handler.check_auth( -- cgit 1.4.1 From f9d34a763c90811cd53965825799767569ba0e68 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 15:54:54 -0600 Subject: Auto-consent to the privacy policy if the user registered with terms --- synapse/rest/client/v2_alpha/register.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 78e63447a7..851ce6e9a4 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -460,6 +460,12 @@ class RegisterRestServlet(RestServlet): params.get("bind_msisdn") ) + if auth_result and LoginType.TERMS in auth_result: + logger.info("User %s has consented to the privacy policy" % registered_user_id) + yield self.store.user_set_consent_version( + registered_user_id, self.hs.config.user_consent_version, + ) + defer.returnValue((200, return_dict)) def on_OPTIONS(self, _): -- cgit 1.4.1 From 537d0b7b3632789e40cec13f3120151098f11d75 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 3 Oct 2018 17:50:11 -0600 Subject: Use a flag rather than a new route for the public policy This also means that the template now has optional parameters, which will need to be documented somehow. --- synapse/handlers/auth.py | 2 +- synapse/rest/client/v2_alpha/auth.py | 4 ++-- synapse/rest/consent/consent_resource.py | 36 +++++++++++++++++++------------- 3 files changed, 25 insertions(+), 17 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d6a19b74e9..42d1336d6e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -471,7 +471,7 @@ class AuthHandler(BaseHandler): "policies": [{ "name": "Privacy Policy", "version": self.hs.config.user_consent_version, - "url": "%s/_matrix/consent/public" % (self.hs.config.public_baseurl,), + "url": "%s/_matrix/consent?public=true" % (self.hs.config.public_baseurl,), }], } diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index f86f09adcf..77a5ea66f3 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -164,7 +164,7 @@ class AuthRestServlet(RestServlet): html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent/public" % ( + 'terms_url': "%s/_matrix/consent?public=true" % ( self.hs.config.public_baseurl, ), 'myurl': "%s/auth/%s/fallback/web" % ( @@ -244,7 +244,7 @@ class AuthRestServlet(RestServlet): else: html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent/public" % ( + 'terms_url': "%s/_matrix/consent?public=true" % ( self.hs.config.public_baseurl, ), 'myurl': "%s/auth/%s/fallback/web" % ( diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 7362e1858d..7a5786f164 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -30,7 +30,7 @@ from twisted.web.server import NOT_DONE_YET from synapse.api.errors import NotFoundError, StoreError, SynapseError from synapse.config import ConfigError from synapse.http.server import finish_request, wrap_html_request_handler -from synapse.http.servlet import parse_string +from synapse.http.servlet import parse_string, parse_boolean from synapse.types import UserID # language to use for the templates. TODO: figure this out from Accept-Language @@ -137,27 +137,35 @@ class ConsentResource(Resource): request (twisted.web.http.Request): """ - version = parse_string(request, "v", - default=self._default_consent_version) - username = parse_string(request, "u", required=True) - userhmac = parse_string(request, "h", required=True, encoding=None) + public_version = parse_boolean(request, "public", default=False) - self._check_hash(username, userhmac) + version = self._default_consent_version + username = None + userhmac = None + has_consented = False + if not public_version: + version = parse_string(request, "v", + default=self._default_consent_version) + username = parse_string(request, "u", required=True) + userhmac = parse_string(request, "h", required=True, encoding=None) - if username.startswith('@'): - qualified_user_id = username - else: - qualified_user_id = UserID(username, self.hs.hostname).to_string() + self._check_hash(username, userhmac) - u = yield self.store.get_user_by_id(qualified_user_id) - if u is None: - raise NotFoundError("Unknown user") + if username.startswith('@'): + qualified_user_id = username + else: + qualified_user_id = UserID(username, self.hs.hostname).to_string() + + u = yield self.store.get_user_by_id(qualified_user_id) + if u is None: + raise NotFoundError("Unknown user") + has_consented = u["consent_version"] == version try: self._render_template( request, "%s.html" % (version,), user=username, userhmac=userhmac, version=version, - has_consented=(u["consent_version"] == version), + has_consented=has_consented, public_version=public_version, ) except TemplateNotFound: raise NotFoundError("Unknown policy version") -- cgit 1.4.1 From 17d585753f1df2b2c2b13ddb8171e174cef97aac Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Oct 2018 15:18:52 +0100 Subject: Delete unreferened state groups during purge --- synapse/storage/events.py | 33 +++++++++++++++++++++++++------ synapse/storage/state.py | 50 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 6 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index e7487311ce..0fb190530a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2025,6 +2025,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] finding state groups which depend on redundant" " state groups") remaining_state_groups = [] + unreferenced_state_groups = 0 for i in range(0, len(state_rows), 100): chunk = [sg for sg, in state_rows[i:i + 100]] # look for state groups whose prev_state_group is one we are about @@ -2037,13 +2038,33 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore retcols=["state_group"], keyvalues={}, ) - remaining_state_groups.extend( - row["state_group"] for row in rows - # exclude state groups we are about to delete: no point in - # updating them - if row["state_group"] not in state_groups_to_delete - ) + for row in rows: + sg = row["state_group"] + + if sg in state_groups_to_delete: + # exclude state groups we are about to delete: no point in + # updating them + continue + + if not self._is_state_group_referenced(txn, sg): + # Let's also delete unreferenced state groups while we're + # here, since otherwise we'd need to de-delta them + state_groups_to_delete.add(sg) + unreferenced_state_groups += 1 + continue + + remaining_state_groups.append(sg) + + logger.info( + "[purge] found %i extra unreferenced state groups to delete", + unreferenced_state_groups, + ) + + logger.info( + "[purge] de-delta-ing %i remaining state groups", + len(remaining_state_groups), + ) # Now we turn the state groups that reference to-be-deleted state # groups to non delta versions. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f4cbd61c4..b88c7dc091 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1041,6 +1041,56 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count + def _is_state_group_referenced(self, txn, state_group): + """Checks if a given state group is referenced, or is safe to delete. + + A state groups is referenced if it or any of its descendants are + pointed at by an event. (A descendant is a group which has the given + state_group as a prev group) + """ + + # We check this by doing a depth first search to look for any + # descendant referenced by `event_to_state_groups`. + + # State groups we need to check, contains state groups that are + # descendants of `state_group` + state_groups_to_search = [state_group] + + # Set of state groups we've already checked + state_groups_searched = set() + + while state_groups_to_search: + state_group = state_groups_to_search.pop() # Next state group to check + + is_referenced = self._simple_select_one_onecol_txn( + txn, + table="event_to_state_groups", + keyvalues={"state_group": state_group}, + retcol="event_id", + allow_none=True, + ) + if is_referenced: + # A descendant is referenced by event_to_state_groups, so + # original state group is referenced. + return True + + state_groups_searched.add(state_group) + + # Find all children of current state group and add to search + references = self._simple_select_onecol_txn( + txn, + table="state_group_edges", + keyvalues={"prev_state_group": state_group}, + retcol="state_group", + ) + state_groups_to_search.extend(references) + + # Lets be paranoid and check for cycles + if state_groups_searched.intersection(references): + raise Exception("State group %s has cyclic dependency", state_group) + + return False + class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): """ Keeps track of the state at a given event. -- cgit 1.4.1 From 4917ff55234718c0e650c6dc2a1117304465b9be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Oct 2018 15:24:01 +0100 Subject: Add state_group index to event_to_state_groups This is needed to efficiently check for unreferenced state groups during purge. --- synapse/storage/prepare_database.py | 2 +- .../delta/52/add_event_to_state_group_index.sql | 19 +++++++++++++++++++ synapse/storage/state.py | 7 +++++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 synapse/storage/schema/delta/52/add_event_to_state_group_index.sql (limited to 'synapse') diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index b364719312..bd740e1e45 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 51 +SCHEMA_VERSION = 52 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql new file mode 100644 index 0000000000..91e03d13e1 --- /dev/null +++ b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql @@ -0,0 +1,19 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- This is needed to efficiently check for unreferenced state groups during +-- purge. Added events_to_state_group(state_group) index +INSERT into background_updates (update_name, progress_json) + VALUES ('event_to_state_groups_sg_index', '{}'); diff --git a/synapse/storage/state.py b/synapse/storage/state.py index b88c7dc091..3f08f447a3 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1114,6 +1114,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" + EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" def __init__(self, db_conn, hs): super(StateStore, self).__init__(db_conn, hs) @@ -1132,6 +1133,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): columns=["state_key"], where_clause="type='m.room.member'", ) + self.register_background_index_update( + self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME, + index_name="event_to_state_groups_sg_index", + table="event_to_state_groups", + columns=["state_group"], + ) def _store_event_state_mappings_txn(self, txn, events_and_contexts): state_groups = {} -- cgit 1.4.1 From 67a1e315cc7f8b270ec87e0ab6e58aa2adaee32d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Oct 2018 13:49:48 +0100 Subject: Fix up comments --- synapse/storage/state.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f08f447a3..f7cf5c86c9 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1044,9 +1044,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def _is_state_group_referenced(self, txn, state_group): """Checks if a given state group is referenced, or is safe to delete. - A state groups is referenced if it or any of its descendants are - pointed at by an event. (A descendant is a group which has the given - state_group as a prev group) + A state group is referenced if it or any of its descendants are + pointed at by an event. (A descendant is a state_group whose chain of + prev_groups includes the given state_group.) """ # We check this by doing a depth first search to look for any -- cgit 1.4.1 From 5119818e9d7dac97854868af102476df57f599e5 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 17:54:28 -0600 Subject: Rely on the lack of ?u to represent public access also general cleanup --- synapse/rest/client/v2_alpha/auth.py | 4 ++-- synapse/rest/consent/consent_resource.py | 12 ++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'synapse') diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 77a5ea66f3..ec583ad16a 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -164,7 +164,7 @@ class AuthRestServlet(RestServlet): html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent?public=true" % ( + 'terms_url': "%s/_matrix/consent" % ( self.hs.config.public_baseurl, ), 'myurl': "%s/auth/%s/fallback/web" % ( @@ -244,7 +244,7 @@ class AuthRestServlet(RestServlet): else: html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent?public=true" % ( + 'terms_url': "%s/_matrix/consent" % ( self.hs.config.public_baseurl, ), 'myurl': "%s/auth/%s/fallback/web" % ( diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 7a5786f164..4cadd71d7e 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -30,7 +30,7 @@ from twisted.web.server import NOT_DONE_YET from synapse.api.errors import NotFoundError, StoreError, SynapseError from synapse.config import ConfigError from synapse.http.server import finish_request, wrap_html_request_handler -from synapse.http.servlet import parse_string, parse_boolean +from synapse.http.servlet import parse_string from synapse.types import UserID # language to use for the templates. TODO: figure this out from Accept-Language @@ -137,16 +137,12 @@ class ConsentResource(Resource): request (twisted.web.http.Request): """ - public_version = parse_boolean(request, "public", default=False) - - version = self._default_consent_version - username = None + version = parse_string(request, "v", default=self._default_consent_version) + username = parse_string(request, "u", required=False, default="") userhmac = None has_consented = False + public_version = username != "" if not public_version: - version = parse_string(request, "v", - default=self._default_consent_version) - username = parse_string(request, "u", required=True) userhmac = parse_string(request, "h", required=True, encoding=None) self._check_hash(username, userhmac) -- cgit 1.4.1 From dd99db846d76d511fc7bbea80897b9101782ec1f Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 12 Oct 2018 18:03:27 -0600 Subject: Update login terms structure for the proposed language support --- synapse/handlers/auth.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 42d1336d6e..9038fee264 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -468,11 +468,14 @@ class AuthHandler(BaseHandler): def _get_params_terms(self): return { - "policies": [{ - "name": "Privacy Policy", + "policies": { + "privacy_policy": { "version": self.hs.config.user_consent_version, - "url": "%s/_matrix/consent?public=true" % (self.hs.config.public_baseurl,), - }], + "en": { + "name": "Privacy Policy", + "url": "%s/_matrix/consent" % (self.hs.config.public_baseurl,), + }, + }, } def _auth_dict_for_flows(self, flows, session): -- cgit 1.4.1 From 762a0982aab04ebec1e7a00bc03d26aefa8461c4 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 15 Oct 2018 14:46:09 -0600 Subject: Python is hard --- synapse/handlers/auth.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 9038fee264..f1befeb575 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -469,11 +469,12 @@ class AuthHandler(BaseHandler): def _get_params_terms(self): return { "policies": { - "privacy_policy": { - "version": self.hs.config.user_consent_version, - "en": { - "name": "Privacy Policy", - "url": "%s/_matrix/consent" % (self.hs.config.public_baseurl,), + "privacy_policy": { + "version": self.hs.config.user_consent_version, + "en": { + "name": "Privacy Policy", + "url": "%s/_matrix/consent" % (self.hs.config.public_baseurl,), + }, }, }, } -- cgit 1.4.1 From 442734ff9e7a4ac09c54a58f8b5467379673914f Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 15 Oct 2018 14:56:13 -0600 Subject: Ensure the terms params are actually provided --- synapse/handlers/auth.py | 1 + 1 file changed, 1 insertion(+) (limited to 'synapse') diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f1befeb575..12979f6ed3 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -486,6 +486,7 @@ class AuthHandler(BaseHandler): get_params = { LoginType.RECAPTCHA: self._get_params_recaptcha, + LoginType.TERMS: self._get_params_terms, } params = {} -- cgit 1.4.1 From a8ed93a4b55a19a478c9aba929bfea07e691abbf Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 15 Oct 2018 16:10:29 -0600 Subject: pep8 --- synapse/handlers/auth.py | 2 +- synapse/rest/client/v2_alpha/auth.py | 3 --- synapse/rest/client/v2_alpha/register.py | 12 ++---------- 3 files changed, 3 insertions(+), 14 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 12979f6ed3..bef796fd0c 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -469,7 +469,7 @@ class AuthHandler(BaseHandler): def _get_params_terms(self): return { "policies": { - "privacy_policy": { + "privacy_policy": { "version": self.hs.config.user_consent_version, "en": { "name": "Privacy Policy", diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index ec583ad16a..0b2933fe8e 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -158,9 +158,6 @@ class AuthRestServlet(RestServlet): defer.returnValue(None) elif stagetype == LoginType.TERMS: session = request.args['session'][0] - authdict = { - 'session': session, - } html = TERMS_TEMPLATE % { 'session': session, diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 851ce6e9a4..c5214330ad 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -359,19 +359,11 @@ class RegisterRestServlet(RestServlet): [LoginType.MSISDN, LoginType.EMAIL_IDENTITY] ]) + # Append m.login.terms to all flows if we're requiring consent if self.hs.config.block_events_without_consent_error is not None: new_flows = [] for flow in flows: - # To only allow registration if completing GDPR auth, - # making clients that don't support it use fallback auth. flow.append(LoginType.TERMS) - - # or to duplicate all the flows above with the GDPR flow on the - # end so clients that support it can use it but clients that don't - # continue to consent via the DM from server notices bot. - #new_flows.extend([ - # flow + [LoginType.TERMS] - #]) flows.extend(new_flows) auth_result, params, session_id = yield self.auth_handler.check_auth( @@ -461,7 +453,7 @@ class RegisterRestServlet(RestServlet): ) if auth_result and LoginType.TERMS in auth_result: - logger.info("User %s has consented to the privacy policy" % registered_user_id) + logger.info("%s has consented to the privacy policy" % registered_user_id) yield self.store.user_set_consent_version( registered_user_id, self.hs.config.user_consent_version, ) -- cgit 1.4.1 From 47a9da28caa6a4f27d2df31f043971a2c9c7b555 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Oct 2018 20:43:18 +0100 Subject: Batch process handling state groups --- synapse/storage/events.py | 81 +++++++++------------------------ synapse/storage/state.py | 112 +++++++++++++++++++++++++++++----------------- 2 files changed, 92 insertions(+), 101 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 0fb190530a..e4d0f8b1a9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -37,6 +37,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore +from synapse.storage.state import StateGroupWorkerStore from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks @@ -203,7 +204,8 @@ def _retry_on_integrity_error(func): # inherits from EventFederationStore so that we can call _update_backward_extremities # and _handle_mult_prev_events (though arguably those could both be moved in here) -class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore): +class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore, + BackgroundUpdateStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" @@ -1995,70 +1997,29 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] finding redundant state groups") - # Get all state groups that are only referenced by events that are - # to be deleted. - # This works by first getting state groups that we may want to delete, - # joining against event_to_state_groups to get events that use that - # state group, then left joining against events_to_purge again. Any - # state group where the left join produce *no nulls* are referenced - # only by events that are going to be purged. + # Get all state groups that are referenced by events that are to be + # deleted. We then go and check if they are referenced by other events + # or state groups, and if not we delete them. txn.execute(""" - SELECT state_group FROM - ( - SELECT DISTINCT state_group FROM events_to_purge - INNER JOIN event_to_state_groups USING (event_id) - ) AS sp - INNER JOIN event_to_state_groups USING (state_group) - LEFT JOIN events_to_purge AS ep USING (event_id) - GROUP BY state_group - HAVING SUM(CASE WHEN ep.event_id IS NULL THEN 1 ELSE 0 END) = 0 + SELECT DISTINCT state_group FROM events_to_purge + INNER JOIN event_to_state_groups USING (event_id) """) - state_rows = txn.fetchall() - logger.info("[purge] found %i redundant state groups", len(state_rows)) - - # make a set of the redundant state groups, so that we can look them up - # efficiently - state_groups_to_delete = set([sg for sg, in state_rows]) - - # Now we get all the state groups that rely on these state groups - logger.info("[purge] finding state groups which depend on redundant" - " state groups") - remaining_state_groups = [] - unreferenced_state_groups = 0 - for i in range(0, len(state_rows), 100): - chunk = [sg for sg, in state_rows[i:i + 100]] - # look for state groups whose prev_state_group is one we are about - # to delete - rows = self._simple_select_many_txn( - txn, - table="state_group_edges", - column="prev_state_group", - iterable=chunk, - retcols=["state_group"], - keyvalues={}, - ) - - for row in rows: - sg = row["state_group"] - - if sg in state_groups_to_delete: - # exclude state groups we are about to delete: no point in - # updating them - continue + referenced_state_groups = set(sg for sg, in txn) + logger.info( + "[purge] found %i referenced state groups", + len(referenced_state_groups), + ) - if not self._is_state_group_referenced(txn, sg): - # Let's also delete unreferenced state groups while we're - # here, since otherwise we'd need to de-delta them - state_groups_to_delete.add(sg) - unreferenced_state_groups += 1 - continue + logger.info("[purge] finding state groups that can be deleted") - remaining_state_groups.append(sg) + state_groups_to_delete, remaining_state_groups = self._find_unreferenced_groups( + txn, referenced_state_groups, + ) logger.info( - "[purge] found %i extra unreferenced state groups to delete", - unreferenced_state_groups, + "[purge] found %i state groups to delete", + len(state_groups_to_delete), ) logger.info( @@ -2109,11 +2070,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] removing redundant state groups") txn.executemany( "DELETE FROM state_groups_state WHERE state_group = ?", - state_rows + ((sg,) for sg in state_groups_to_delete), ) txn.executemany( "DELETE FROM state_groups WHERE id = ?", - state_rows + ((sg,) for sg in state_groups_to_delete), ) logger.info("[purge] removing events from event_to_state_groups") diff --git a/synapse/storage/state.py b/synapse/storage/state.py index f7cf5c86c9..0f86311ed4 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1041,55 +1041,85 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count - def _is_state_group_referenced(self, txn, state_group): - """Checks if a given state group is referenced, or is safe to delete. + def _find_unreferenced_groups(self, txn, state_groups): + """Used when purging history to figure out which state groups can be + deleted and which need to be de-delta'ed (due to one of its prev groups + being scheduled for deletion). - A state group is referenced if it or any of its descendants are - pointed at by an event. (A descendant is a state_group whose chain of - prev_groups includes the given state_group.) - """ - - # We check this by doing a depth first search to look for any - # descendant referenced by `event_to_state_groups`. - - # State groups we need to check, contains state groups that are - # descendants of `state_group` - state_groups_to_search = [state_group] - - # Set of state groups we've already checked - state_groups_searched = set() - - while state_groups_to_search: - state_group = state_groups_to_search.pop() # Next state group to check + Args: + txn + state_groups (set[int]): Set of state groups referenced by events + that are going to be deleted. - is_referenced = self._simple_select_one_onecol_txn( + Returns: + tuple[set[int], set[int]]: The set of state groups that can be + deleted and the set of state groups that need to be de-delta'ed + """ + # Graph of state group -> previous group + graph = {} + + # Set of events that we have found to be referenced by events + referenced_groups = set() + + # Set of state groups we've already seen + state_groups_seen = set(state_groups) + + # Set of state groups to handle next. + next_to_search = set(state_groups) + while next_to_search: + # We bound size of groups we're looking up at once, to stop the + # SQL query getting too big + if len(next_to_search) < 100: + current_search = next_to_search + next_to_search = set() + else: + lst = list(next_to_search) + current_search = set(lst[:100]) + next_to_search = set(lst[100:]) + + # Check if state groups are referenced + sql = """ + SELECT state_group, count(*) FROM event_to_state_groups + LEFT JOIN events_to_purge AS ep USING (event_id) + WHERE state_group IN (%s) AND ep.event_id IS NULL + GROUP BY state_group + """ % (",".join("?" for _ in current_search),) + txn.execute(sql, list(current_search)) + + referenced = set(sg for sg, cnt in txn if cnt > 0) + referenced_groups |= referenced + + # We don't continue iterating up the state group graphs for state + # groups that are referenced. + current_search -= referenced + + rows = self._simple_select_many_txn( txn, - table="event_to_state_groups", - keyvalues={"state_group": state_group}, - retcol="event_id", - allow_none=True, + table="state_group_edges", + column="prev_state_group", + iterable=current_search, + keyvalues={}, + retcols=("prev_state_group", "state_group",), ) - if is_referenced: - # A descendant is referenced by event_to_state_groups, so - # original state group is referenced. - return True - state_groups_searched.add(state_group) + next_to_search.update(row["state_group"] for row in rows) + # We don't bother re-handling groups we've already seen + next_to_search -= state_groups_seen + state_groups_seen |= next_to_search - # Find all children of current state group and add to search - references = self._simple_select_onecol_txn( - txn, - table="state_group_edges", - keyvalues={"prev_state_group": state_group}, - retcol="state_group", - ) - state_groups_to_search.extend(references) + for row in rows: + # Note: Each state group can have at most one prev group + graph[row["state_group"]] = row["prev_state_group"] + + to_delete = state_groups_seen - referenced_groups - # Lets be paranoid and check for cycles - if state_groups_searched.intersection(references): - raise Exception("State group %s has cyclic dependency", state_group) + to_dedelta = set() + for sg in referenced_groups: + prev_sg = graph.get(sg) + if prev_sg and prev_sg in to_delete: + to_dedelta.add(sg) - return False + return to_delete, to_dedelta class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): -- cgit 1.4.1 From 67f7b9cb50c246226b94027e3c1d4b958ff9f840 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Oct 2018 16:06:59 +0100 Subject: pep8 --- synapse/storage/events.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index af822fb69d..379b5c514f 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2041,7 +2041,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore INNER JOIN event_to_state_groups USING (event_id) """) - referenced_state_groups = set(sg for sg, in txn) + referenced_state_groups = set(sg for sg, in txn) logger.info( "[purge] found %i referenced state groups", len(referenced_state_groups), -- cgit 1.4.1 From 2b791865c454f3bd08aa16431ef506305f712609 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 29 Oct 2018 21:52:52 +1100 Subject: version bump --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 1ddbbbebfb..c8377df7b2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.7" +__version__ = "0.33.8rc1" -- cgit 1.4.1 From b2399f6281d7cd11e7762b683bdd5a4f0c24927e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:01:11 +0000 Subject: Make SQL a bit cleaner --- synapse/storage/state.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 59a50a5df9..45afd42b3f 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1272,14 +1272,13 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # Check if state groups are referenced sql = """ - SELECT state_group, count(*) FROM event_to_state_groups + SELECT DISTINCT state_group FROM event_to_state_groups LEFT JOIN events_to_purge AS ep USING (event_id) WHERE state_group IN (%s) AND ep.event_id IS NULL - GROUP BY state_group """ % (",".join("?" for _ in current_search),) txn.execute(sql, list(current_search)) - referenced = set(sg for sg, cnt in txn if cnt > 0) + referenced = set(sg for sg, in txn) referenced_groups |= referenced # We don't continue iterating up the state group graphs for state -- cgit 1.4.1 From f4f223aa4455bea3aa642c23ae957932b1168ba3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:01:49 +0000 Subject: Don't make temporary list --- synapse/storage/state.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 45afd42b3f..947d3fc177 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1266,9 +1266,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): current_search = next_to_search next_to_search = set() else: - lst = list(next_to_search) - current_search = set(lst[:100]) - next_to_search = set(lst[100:]) + current_search = set(islice(next_to_search, 100)) + next_to_search -= current_search # Check if state groups are referenced sql = """ -- cgit 1.4.1 From 664b192a3b4aa57597d9832361b025bc078ea87a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:21:43 +0000 Subject: Fix set operations thinko --- synapse/storage/state.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 947d3fc177..dfec57c045 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1293,10 +1293,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): retcols=("prev_state_group", "state_group",), ) - next_to_search.update(row["state_group"] for row in rows) + prevs = set(row["state_group"] for row in rows) # We don't bother re-handling groups we've already seen - next_to_search -= state_groups_seen - state_groups_seen |= next_to_search + prevs -= state_groups_seen + next_to_search |= prevs + state_groups_seen |= prevs for row in rows: # Note: Each state group can have at most one prev group -- cgit 1.4.1 From ad88460e0d2e0a7c2cf39ec5539d5c4ff030bbbd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:23:34 +0000 Subject: Move _find_unreferenced_groups --- synapse/storage/events.py | 85 +++++++++++++++++++++++++++++++++++++++++++++-- synapse/storage/state.py | 79 ------------------------------------------- 2 files changed, 83 insertions(+), 81 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/events.py b/synapse/storage/events.py index e791922733..919e855f3b 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2052,8 +2052,10 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore logger.info("[purge] finding state groups that can be deleted") - state_groups_to_delete, remaining_state_groups = self._find_unreferenced_groups( - txn, referenced_state_groups, + state_groups_to_delete, remaining_state_groups = ( + self._find_unreferenced_groups_during_purge( + txn, referenced_state_groups, + ) ) logger.info( @@ -2209,6 +2211,85 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore logger.info("[purge] done") + def _find_unreferenced_groups_during_purge(self, txn, state_groups): + """Used when purging history to figure out which state groups can be + deleted and which need to be de-delta'ed (due to one of its prev groups + being scheduled for deletion). + + Args: + txn + state_groups (set[int]): Set of state groups referenced by events + that are going to be deleted. + + Returns: + tuple[set[int], set[int]]: The set of state groups that can be + deleted and the set of state groups that need to be de-delta'ed + """ + # Graph of state group -> previous group + graph = {} + + # Set of events that we have found to be referenced by events + referenced_groups = set() + + # Set of state groups we've already seen + state_groups_seen = set(state_groups) + + # Set of state groups to handle next. + next_to_search = set(state_groups) + while next_to_search: + # We bound size of groups we're looking up at once, to stop the + # SQL query getting too big + if len(next_to_search) < 100: + current_search = next_to_search + next_to_search = set() + else: + current_search = set(itertools.islice(next_to_search, 100)) + next_to_search -= current_search + + # Check if state groups are referenced + sql = """ + SELECT DISTINCT state_group FROM event_to_state_groups + LEFT JOIN events_to_purge AS ep USING (event_id) + WHERE state_group IN (%s) AND ep.event_id IS NULL + """ % (",".join("?" for _ in current_search),) + txn.execute(sql, list(current_search)) + + referenced = set(sg for sg, in txn) + referenced_groups |= referenced + + # We don't continue iterating up the state group graphs for state + # groups that are referenced. + current_search -= referenced + + rows = self._simple_select_many_txn( + txn, + table="state_group_edges", + column="prev_state_group", + iterable=current_search, + keyvalues={}, + retcols=("prev_state_group", "state_group",), + ) + + prevs = set(row["state_group"] for row in rows) + # We don't bother re-handling groups we've already seen + prevs -= state_groups_seen + next_to_search |= prevs + state_groups_seen |= prevs + + for row in rows: + # Note: Each state group can have at most one prev group + graph[row["state_group"]] = row["prev_state_group"] + + to_delete = state_groups_seen - referenced_groups + + to_dedelta = set() + for sg in referenced_groups: + prev_sg = graph.get(sg) + if prev_sg and prev_sg in to_delete: + to_dedelta.add(sg) + + return to_delete, to_dedelta + @defer.inlineCallbacks def is_event_after(self, event_id1, event_id2): """Returns True if event_id1 is after event_id2 in the stream diff --git a/synapse/storage/state.py b/synapse/storage/state.py index dfec57c045..d737bd6778 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1234,85 +1234,6 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count - def _find_unreferenced_groups(self, txn, state_groups): - """Used when purging history to figure out which state groups can be - deleted and which need to be de-delta'ed (due to one of its prev groups - being scheduled for deletion). - - Args: - txn - state_groups (set[int]): Set of state groups referenced by events - that are going to be deleted. - - Returns: - tuple[set[int], set[int]]: The set of state groups that can be - deleted and the set of state groups that need to be de-delta'ed - """ - # Graph of state group -> previous group - graph = {} - - # Set of events that we have found to be referenced by events - referenced_groups = set() - - # Set of state groups we've already seen - state_groups_seen = set(state_groups) - - # Set of state groups to handle next. - next_to_search = set(state_groups) - while next_to_search: - # We bound size of groups we're looking up at once, to stop the - # SQL query getting too big - if len(next_to_search) < 100: - current_search = next_to_search - next_to_search = set() - else: - current_search = set(islice(next_to_search, 100)) - next_to_search -= current_search - - # Check if state groups are referenced - sql = """ - SELECT DISTINCT state_group FROM event_to_state_groups - LEFT JOIN events_to_purge AS ep USING (event_id) - WHERE state_group IN (%s) AND ep.event_id IS NULL - """ % (",".join("?" for _ in current_search),) - txn.execute(sql, list(current_search)) - - referenced = set(sg for sg, in txn) - referenced_groups |= referenced - - # We don't continue iterating up the state group graphs for state - # groups that are referenced. - current_search -= referenced - - rows = self._simple_select_many_txn( - txn, - table="state_group_edges", - column="prev_state_group", - iterable=current_search, - keyvalues={}, - retcols=("prev_state_group", "state_group",), - ) - - prevs = set(row["state_group"] for row in rows) - # We don't bother re-handling groups we've already seen - prevs -= state_groups_seen - next_to_search |= prevs - state_groups_seen |= prevs - - for row in rows: - # Note: Each state group can have at most one prev group - graph[row["state_group"]] = row["prev_state_group"] - - to_delete = state_groups_seen - referenced_groups - - to_dedelta = set() - for sg in referenced_groups: - prev_sg = graph.get(sg) - if prev_sg and prev_sg in to_delete: - to_dedelta.add(sg) - - return to_delete, to_dedelta - class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): """ Keeps track of the state at a given event. -- cgit 1.4.1 From 0dce9e1379ea867c9a00c8e6cf1d42badb52601d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 30 Oct 2018 23:55:43 +1100 Subject: Write some tests for the email pusher (#4095) --- .travis.yml | 11 ++-- changelog.d/4095.bugfix | 1 + synapse/push/emailpusher.py | 5 +- synapse/push/mailer.py | 10 +-- synapse/server.py | 5 ++ tests/push/__init__.py | 0 tests/push/test_email.py | 148 ++++++++++++++++++++++++++++++++++++++++++++ tests/server.py | 4 +- tests/test_mau.py | 2 +- tests/unittest.py | 9 ++- 10 files changed, 182 insertions(+), 13 deletions(-) create mode 100644 changelog.d/4095.bugfix create mode 100644 tests/push/__init__.py create mode 100644 tests/push/test_email.py (limited to 'synapse') diff --git a/.travis.yml b/.travis.yml index fd41841c77..655fab9d8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,6 +23,9 @@ branches: - develop - /^release-v/ +# When running the tox environments that call Twisted Trial, we can pass the -j +# flag to run the tests concurrently. We set this to 2 for CPU bound tests +# (SQLite) and 4 for I/O bound tests (PostgreSQL). matrix: fast_finish: true include: @@ -33,10 +36,10 @@ matrix: env: TOX_ENV="pep8,check_isort" - python: 2.7 - env: TOX_ENV=py27 + env: TOX_ENV=py27 TRIAL_FLAGS="-j 2" - python: 2.7 - env: TOX_ENV=py27-old + env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2" - python: 2.7 env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" @@ -44,10 +47,10 @@ matrix: - postgresql - python: 3.5 - env: TOX_ENV=py35 + env: TOX_ENV=py35 TRIAL_FLAGS="-j 2" - python: 3.6 - env: TOX_ENV=py36 + env: TOX_ENV=py36 TRIAL_FLAGS="-j 2" - python: 3.6 env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4" diff --git a/changelog.d/4095.bugfix b/changelog.d/4095.bugfix new file mode 100644 index 0000000000..76ee7148c2 --- /dev/null +++ b/changelog.d/4095.bugfix @@ -0,0 +1 @@ +Fix exceptions when using the email mailer on Python 3. diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index f369124258..50e1007d84 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -85,7 +85,10 @@ class EmailPusher(object): self.timed_call = None def on_new_notifications(self, min_stream_ordering, max_stream_ordering): - self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + if self.max_stream_ordering: + self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + else: + self.max_stream_ordering = max_stream_ordering self._start_processing() def on_new_receipts(self, min_stream_id, max_stream_id): diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 16fb5e8471..ebcb93bfc7 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -26,7 +26,6 @@ import bleach import jinja2 from twisted.internet import defer -from twisted.mail.smtp import sendmail from synapse.api.constants import EventTypes from synapse.api.errors import StoreError @@ -85,6 +84,7 @@ class Mailer(object): self.notif_template_html = notif_template_html self.notif_template_text = notif_template_text + self.sendmail = self.hs.get_sendmail() self.store = self.hs.get_datastore() self.macaroon_gen = self.hs.get_macaroon_generator() self.state_handler = self.hs.get_state_handler() @@ -191,11 +191,11 @@ class Mailer(object): multipart_msg.attach(html_part) logger.info("Sending email push notification to %s" % email_address) - # logger.debug(html_text) - yield sendmail( + yield self.sendmail( self.hs.config.email_smtp_host, - raw_from, raw_to, multipart_msg.as_string(), + raw_from, raw_to, multipart_msg.as_string().encode('utf8'), + reactor=self.hs.get_reactor(), port=self.hs.config.email_smtp_port, requireAuthentication=self.hs.config.email_smtp_user is not None, username=self.hs.config.email_smtp_user, @@ -333,7 +333,7 @@ class Mailer(object): notif_events, user_id, reason): if len(notifs_by_room) == 1: # Only one room has new stuff - room_id = notifs_by_room.keys()[0] + room_id = list(notifs_by_room.keys())[0] # If the room has some kind of name, use it, but we don't # want the generated-from-names one here otherwise we'll diff --git a/synapse/server.py b/synapse/server.py index cf6b872cbd..9985687b95 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -23,6 +23,7 @@ import abc import logging from twisted.enterprise import adbapi +from twisted.mail.smtp import sendmail from twisted.web.client import BrowserLikePolicyForHTTPS from synapse.api.auth import Auth @@ -174,6 +175,7 @@ class HomeServer(object): 'message_handler', 'pagination_handler', 'room_context_handler', + 'sendmail', ] # This is overridden in derived application classes @@ -269,6 +271,9 @@ class HomeServer(object): def build_room_creation_handler(self): return RoomCreationHandler(self) + def build_sendmail(self): + return sendmail + def build_state_handler(self): return StateHandler(self) diff --git a/tests/push/__init__.py b/tests/push/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/push/test_email.py b/tests/push/test_email.py new file mode 100644 index 0000000000..50ee6910d1 --- /dev/null +++ b/tests/push/test_email.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pkg_resources + +from twisted.internet.defer import Deferred + +from synapse.rest.client.v1 import admin, login, room + +from tests.unittest import HomeserverTestCase + +try: + from synapse.push.mailer import load_jinja2_templates +except Exception: + load_jinja2_templates = None + + +class EmailPusherTests(HomeserverTestCase): + + skip = "No Jinja installed" if not load_jinja2_templates else None + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def make_homeserver(self, reactor, clock): + + # List[Tuple[Deferred, args, kwargs]] + self.email_attempts = [] + + def sendmail(*args, **kwargs): + d = Deferred() + self.email_attempts.append((d, args, kwargs)) + return d + + config = self.default_config() + config.email_enable_notifs = True + config.start_pushers = True + + config.email_template_dir = os.path.abspath( + pkg_resources.resource_filename('synapse', 'res/templates') + ) + config.email_notif_template_html = "notif_mail.html" + config.email_notif_template_text = "notif_mail.txt" + config.email_smtp_host = "127.0.0.1" + config.email_smtp_port = 20 + config.require_transport_security = False + config.email_smtp_user = None + config.email_app_name = "Matrix" + config.email_notif_from = "test@example.com" + + hs = self.setup_test_homeserver(config=config, sendmail=sendmail) + + return hs + + def test_sends_email(self): + + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("otheruser", "pass") + other_access_token = self.login("otheruser", "pass") + + # Register the pusher + user_tuple = self.get_success( + self.hs.get_datastore().get_user_by_access_token(access_token) + ) + token_id = user_tuple["token_id"] + + self.get_success( + self.hs.get_pusherpool().add_pusher( + user_id=user_id, + access_token=token_id, + kind="email", + app_id="m.email", + app_display_name="Email Notifications", + device_display_name="a@example.com", + pushkey="a@example.com", + lang=None, + data={}, + ) + ) + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # Invite the other person + self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends some messages + self.helper.send(room, body="Hi!", tok=other_access_token) + self.helper.send(room, body="There!", tok=other_access_token) + + # Get the stream ordering before it gets sent + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + last_stream_ordering = pushers[0]["last_stream_ordering"] + + # Advance time a bit, so the pusher will register something has happened + self.pump(100) + + # It hasn't succeeded yet, so the stream ordering shouldn't have moved + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"]) + + # One email was attempted to be sent + self.assertEqual(len(self.email_attempts), 1) + + # Make the email succeed + self.email_attempts[0][0].callback(True) + self.pump() + + # One email was attempted to be sent + self.assertEqual(len(self.email_attempts), 1) + + # The stream ordering has increased + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering) diff --git a/tests/server.py b/tests/server.py index 7bee58dff1..819c854448 100644 --- a/tests/server.py +++ b/tests/server.py @@ -125,7 +125,9 @@ def make_request(method, path, content=b"", access_token=None, request=SynapseRe req.content = BytesIO(content) if access_token: - req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token) + req.requestHeaders.addRawHeader( + b"Authorization", b"Bearer " + access_token.encode('ascii') + ) if content: req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") diff --git a/tests/test_mau.py b/tests/test_mau.py index bdbacb8448..5d387851c5 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -207,7 +207,7 @@ class TestMauLimit(unittest.TestCase): def do_sync_for_user(self, token): request, channel = make_request( - "GET", "/sync", access_token=token.encode('ascii') + "GET", "/sync", access_token=token ) render(request, self.resource, self.reactor) diff --git a/tests/unittest.py b/tests/unittest.py index a59291cc60..4d40bdb6a5 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -146,6 +146,13 @@ def DEBUG(target): return target +def INFO(target): + """A decorator to set the .loglevel attribute to logging.INFO. + Can apply to either a TestCase or an individual test method.""" + target.loglevel = logging.INFO + return target + + class HomeserverTestCase(TestCase): """ A base TestCase that reduces boilerplate for HomeServer-using test cases. @@ -373,5 +380,5 @@ class HomeserverTestCase(TestCase): self.render(request) self.assertEqual(channel.code, 200) - access_token = channel.json_body["access_token"].encode('ascii') + access_token = channel.json_body["access_token"] return access_token -- cgit 1.4.1 From 2e223a8c22ef7f65aa42fd149f178a842b60e3c7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:24:59 +1100 Subject: Remove the unused /pull federation API (#4118) --- changelog.d/4118.removal | 1 + synapse/federation/federation_server.py | 5 ----- synapse/federation/transport/server.py | 9 --------- 3 files changed, 1 insertion(+), 14 deletions(-) create mode 100644 changelog.d/4118.removal (limited to 'synapse') diff --git a/changelog.d/4118.removal b/changelog.d/4118.removal new file mode 100644 index 0000000000..6fb1d67b47 --- /dev/null +++ b/changelog.d/4118.removal @@ -0,0 +1 @@ +The obsolete and non-functional /pull federation endpoint has been removed. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 0f9302a6a8..fa2cc550e2 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -323,11 +323,6 @@ class FederationServer(FederationBase): else: defer.returnValue((404, "")) - @defer.inlineCallbacks - @log_function - def on_pull_request(self, origin, versions): - raise NotImplementedError("Pull transactions not implemented") - @defer.inlineCallbacks def on_query_request(self, query_type, args): received_queries_counter.labels(query_type).inc() diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 7288d49074..3553f418f1 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -362,14 +362,6 @@ class FederationSendServlet(BaseFederationServlet): defer.returnValue((code, response)) -class FederationPullServlet(BaseFederationServlet): - PATH = "/pull/" - - # This is for when someone asks us for everything since version X - def on_GET(self, origin, content, query): - return self.handler.on_pull_request(query["origin"][0], query["v"]) - - class FederationEventServlet(BaseFederationServlet): PATH = "/event/(?P[^/]*)/" @@ -1261,7 +1253,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, - FederationPullServlet, FederationEventServlet, FederationStateServlet, FederationStateIdsServlet, -- cgit 1.4.1 From 3bade14ec0aa7e56c84d30241bd86a177f0699d6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:33:41 +1100 Subject: Fix search 500ing (#4122) --- changelog.d/4122.bugfix | 1 + synapse/handlers/search.py | 8 ++- tests/rest/client/v1/test_rooms.py | 106 ++++++++++++++++++++++++++++++++++++- 3 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 changelog.d/4122.bugfix (limited to 'synapse') diff --git a/changelog.d/4122.bugfix b/changelog.d/4122.bugfix new file mode 100644 index 0000000000..66dcfb18b9 --- /dev/null +++ b/changelog.d/4122.bugfix @@ -0,0 +1 @@ +Searches that request profile info now no longer fail with a 500. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0c1d52fd11..80e7b15de8 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -324,9 +325,12 @@ class SearchHandler(BaseHandler): else: last_event_id = event.event_id + state_filter = StateFilter.from_types( + [(EventTypes.Member, sender) for sender in senders] + ) + state = yield self.store.get_state_for_event( - last_event_id, - types=[(EventTypes.Member, sender) for sender in senders] + last_event_id, state_filter ) res["profile_info"] = { diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 359f7777ff..a824be9a62 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -23,7 +23,7 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer from synapse.api.constants import Membership -from synapse.rest.client.v1 import room +from synapse.rest.client.v1 import admin, login, room from tests import unittest @@ -799,3 +799,107 @@ class RoomMessageListTestCase(RoomBase): self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) + + +class RoomSearchTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def prepare(self, reactor, clock, hs): + + # Register the user who does the searching + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") + + # Register the user who sends the message + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + # Create a room + self.room = self.helper.create_room_as(self.user_id, tok=self.access_token) + + # Invite the other person + self.helper.invite( + room=self.room, + src=self.user_id, + tok=self.access_token, + targ=self.other_user_id, + ) + + # The other user joins + self.helper.join( + room=self.room, user=self.other_user_id, tok=self.other_access_token + ) + + def test_finds_message(self): + """ + The search functionality will search for content in messages if asked to + do so. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": {"keys": ["content.body"], "search_term": "Hi"} + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # No context was requested, so we should get none. + self.assertEqual(results["results"][0]["context"], {}) + + def test_include_context(self): + """ + When event_context includes include_profile, profile information will be + included in the search response. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": { + "keys": ["content.body"], + "search_term": "Hi", + "event_context": {"include_profile": True}, + } + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # We should get context info, like the two users, and the display names. + context = results["results"][0]["context"] + self.assertEqual(len(context["profile_info"].keys()), 2) + self.assertEqual( + context["profile_info"][self.other_user_id]["displayname"], "otheruser" + ) -- cgit 1.4.1 From 086e1a8f3e873351a749cb8b341cd2adbe701049 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:33:41 +1100 Subject: Fix search 500ing (#4122) --- changelog.d/4122.bugfix | 1 + synapse/handlers/search.py | 8 ++- tests/rest/client/v1/test_rooms.py | 106 ++++++++++++++++++++++++++++++++++++- 3 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 changelog.d/4122.bugfix (limited to 'synapse') diff --git a/changelog.d/4122.bugfix b/changelog.d/4122.bugfix new file mode 100644 index 0000000000..66dcfb18b9 --- /dev/null +++ b/changelog.d/4122.bugfix @@ -0,0 +1 @@ +Searches that request profile info now no longer fail with a 500. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0c1d52fd11..80e7b15de8 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -324,9 +325,12 @@ class SearchHandler(BaseHandler): else: last_event_id = event.event_id + state_filter = StateFilter.from_types( + [(EventTypes.Member, sender) for sender in senders] + ) + state = yield self.store.get_state_for_event( - last_event_id, - types=[(EventTypes.Member, sender) for sender in senders] + last_event_id, state_filter ) res["profile_info"] = { diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 359f7777ff..a824be9a62 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -23,7 +23,7 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer from synapse.api.constants import Membership -from synapse.rest.client.v1 import room +from synapse.rest.client.v1 import admin, login, room from tests import unittest @@ -799,3 +799,107 @@ class RoomMessageListTestCase(RoomBase): self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) + + +class RoomSearchTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def prepare(self, reactor, clock, hs): + + # Register the user who does the searching + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") + + # Register the user who sends the message + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + # Create a room + self.room = self.helper.create_room_as(self.user_id, tok=self.access_token) + + # Invite the other person + self.helper.invite( + room=self.room, + src=self.user_id, + tok=self.access_token, + targ=self.other_user_id, + ) + + # The other user joins + self.helper.join( + room=self.room, user=self.other_user_id, tok=self.other_access_token + ) + + def test_finds_message(self): + """ + The search functionality will search for content in messages if asked to + do so. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": {"keys": ["content.body"], "search_term": "Hi"} + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # No context was requested, so we should get none. + self.assertEqual(results["results"][0]["context"], {}) + + def test_include_context(self): + """ + When event_context includes include_profile, profile information will be + included in the search response. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": { + "keys": ["content.body"], + "search_term": "Hi", + "event_context": {"include_profile": True}, + } + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # We should get context info, like the two users, and the display names. + context = results["results"][0]["context"] + self.assertEqual(len(context["profile_info"].keys()), 2) + self.assertEqual( + context["profile_info"][self.other_user_id]["displayname"], "otheruser" + ) -- cgit 1.4.1 From 67c1924899721e57093470d2f449f745a983b1d9 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 06:27:05 +1100 Subject: version bump --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index c8377df7b2..97a57a7ac1 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.8rc1" +__version__ = "0.33.8rc2" -- cgit 1.4.1 From f79f45448527f22f3813e38233521a5e13e9223e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 22:29:02 +1100 Subject: Remove deprecated v1 key exchange endpoint (#4119) --- changelog.d/4119.removal | 1 + synapse/api/urls.py | 1 - synapse/app/homeserver.py | 7 +-- synapse/rest/key/v1/__init__.py | 14 ----- synapse/rest/key/v1/server_key_resource.py | 92 ------------------------------ 5 files changed, 2 insertions(+), 113 deletions(-) create mode 100644 changelog.d/4119.removal delete mode 100644 synapse/rest/key/v1/__init__.py delete mode 100644 synapse/rest/key/v1/server_key_resource.py (limited to 'synapse') diff --git a/changelog.d/4119.removal b/changelog.d/4119.removal new file mode 100644 index 0000000000..81383ece6b --- /dev/null +++ b/changelog.d/4119.removal @@ -0,0 +1 @@ +The deprecated v1 key exchange endpoints have been removed. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 6d9f1ca0ef..f78695b657 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -28,7 +28,6 @@ FEDERATION_PREFIX = "/_matrix/federation/v1" STATIC_PREFIX = "/_matrix/static" WEB_CLIENT_PREFIX = "/_matrix/client" CONTENT_REPO_PREFIX = "/_matrix/content" -SERVER_KEY_PREFIX = "/_matrix/key/v1" SERVER_KEY_V2_PREFIX = "/_matrix/key/v2" MEDIA_PREFIX = "/_matrix/media/r0" LEGACY_MEDIA_PREFIX = "/_matrix/media/v1" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 593e1e75db..415374a2ce 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -37,7 +37,6 @@ from synapse.api.urls import ( FEDERATION_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, - SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, STATIC_PREFIX, WEB_CLIENT_PREFIX, @@ -59,7 +58,6 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirem from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource -from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.server import HomeServer @@ -236,10 +234,7 @@ class SynapseHomeServer(HomeServer): ) if name in ["keys", "federation"]: - resources.update({ - SERVER_KEY_PREFIX: LocalKey(self), - SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self), - }) + resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "webclient": resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self) diff --git a/synapse/rest/key/v1/__init__.py b/synapse/rest/key/v1/__init__.py deleted file mode 100644 index fe0ac3f8e9..0000000000 --- a/synapse/rest/key/v1/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py deleted file mode 100644 index 38eb2ee23f..0000000000 --- a/synapse/rest/key/v1/server_key_resource.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging - -from canonicaljson import encode_canonical_json -from signedjson.sign import sign_json -from unpaddedbase64 import encode_base64 - -from OpenSSL import crypto -from twisted.web.resource import Resource - -from synapse.http.server import respond_with_json_bytes - -logger = logging.getLogger(__name__) - - -class LocalKey(Resource): - """HTTP resource containing encoding the TLS X.509 certificate and NACL - signature verification keys for this server:: - - GET /key HTTP/1.1 - - HTTP/1.1 200 OK - Content-Type: application/json - { - "server_name": "this.server.example.com" - "verify_keys": { - "algorithm:version": # base64 encoded NACL verification key. - }, - "tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert. - "signatures": { - "this.server.example.com": { - "algorithm:version": # NACL signature for this server. - } - } - } - """ - - def __init__(self, hs): - self.response_body = encode_canonical_json( - self.response_json_object(hs.config) - ) - Resource.__init__(self) - - @staticmethod - def response_json_object(server_config): - verify_keys = {} - for key in server_config.signing_key: - verify_key_bytes = key.verify_key.encode() - key_id = "%s:%s" % (key.alg, key.version) - verify_keys[key_id] = encode_base64(verify_key_bytes) - - x509_certificate_bytes = crypto.dump_certificate( - crypto.FILETYPE_ASN1, - server_config.tls_certificate - ) - json_object = { - u"server_name": server_config.server_name, - u"verify_keys": verify_keys, - u"tls_certificate": encode_base64(x509_certificate_bytes) - } - for key in server_config.signing_key: - json_object = sign_json( - json_object, - server_config.server_name, - key, - ) - - return json_object - - def render_GET(self, request): - return respond_with_json_bytes( - request, 200, self.response_body, - ) - - def getChild(self, name, request): - if name == b'': - return self -- cgit 1.4.1 From 916efc824950f924c3f7bced09b9cd5759b1532e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 23:14:39 +1100 Subject: Remove fetching keys via the deprecated v1 kex method (#4120) --- changelog.d/4120.removal | 1 + synapse/crypto/keyclient.py | 8 ++-- synapse/crypto/keyring.py | 110 +++----------------------------------------- 3 files changed, 13 insertions(+), 106 deletions(-) create mode 100644 changelog.d/4120.removal (limited to 'synapse') diff --git a/changelog.d/4120.removal b/changelog.d/4120.removal new file mode 100644 index 0000000000..a7a567098f --- /dev/null +++ b/changelog.d/4120.removal @@ -0,0 +1 @@ +Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2. diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 080c81f14b..d40e4b8591 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -15,6 +15,8 @@ import logging +from six.moves import urllib + from canonicaljson import json from twisted.internet import defer, reactor @@ -28,15 +30,15 @@ from synapse.util import logcontext logger = logging.getLogger(__name__) -KEY_API_V1 = b"/_matrix/key/v1/" +KEY_API_V2 = "/_matrix/key/v2/server/%s" @defer.inlineCallbacks -def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): +def fetch_server_key(server_name, tls_client_options_factory, key_id): """Fetch the keys for a remote server.""" factory = SynapseKeyClientFactory() - factory.path = path + factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), ) factory.host = server_name endpoint = matrix_federation_endpoint( reactor, server_name, tls_client_options_factory, timeout=30 diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d89f94c219..515ebbc148 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2017 New Vector Ltd. +# Copyright 2017, 2018 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,8 +18,6 @@ import hashlib import logging from collections import namedtuple -from six.moves import urllib - from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -395,32 +393,13 @@ class Keyring(object): @defer.inlineCallbacks def get_keys_from_server(self, server_name_and_key_ids): - @defer.inlineCallbacks - def get_key(server_name, key_ids): - keys = None - try: - keys = yield self.get_server_verify_key_v2_direct( - server_name, key_ids - ) - except Exception as e: - logger.info( - "Unable to get key %r for %r directly: %s %s", - key_ids, server_name, - type(e).__name__, str(e), - ) - - if not keys: - keys = yield self.get_server_verify_key_v1_direct( - server_name, key_ids - ) - - keys = {server_name: keys} - - defer.returnValue(keys) - results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - run_in_background(get_key, server_name, key_ids) + run_in_background( + self.get_server_verify_key_v2_direct, + server_name, + key_ids, + ) for server_name, key_ids in server_name_and_key_ids ], consumeErrors=True, @@ -525,10 +504,7 @@ class Keyring(object): continue (response, tls_certificate) = yield fetch_server_key( - server_name, self.hs.tls_client_options_factory, - path=("/_matrix/key/v2/server/%s" % ( - urllib.parse.quote(requested_key_id), - )).encode("ascii"), + server_name, self.hs.tls_client_options_factory, requested_key_id ) if (u"signatures" not in response @@ -657,78 +633,6 @@ class Keyring(object): defer.returnValue(results) - @defer.inlineCallbacks - def get_server_verify_key_v1_direct(self, server_name, key_ids): - """Finds a verification key for the server with one of the key ids. - Args: - server_name (str): The name of the server to fetch a key for. - keys_ids (list of str): The key_ids to check for. - """ - - # Try to fetch the key from the remote server. - - (response, tls_certificate) = yield fetch_server_key( - server_name, self.hs.tls_client_options_factory - ) - - # Check the response. - - x509_certificate_bytes = crypto.dump_certificate( - crypto.FILETYPE_ASN1, tls_certificate - ) - - if ("signatures" not in response - or server_name not in response["signatures"]): - raise KeyLookupError("Key response not signed by remote server") - - if "tls_certificate" not in response: - raise KeyLookupError("Key response missing TLS certificate") - - tls_certificate_b64 = response["tls_certificate"] - - if encode_base64(x509_certificate_bytes) != tls_certificate_b64: - raise KeyLookupError("TLS certificate doesn't match") - - # Cache the result in the datastore. - - time_now_ms = self.clock.time_msec() - - verify_keys = {} - for key_id, key_base64 in response["verify_keys"].items(): - if is_signing_algorithm_supported(key_id): - key_bytes = decode_base64(key_base64) - verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_key.time_added = time_now_ms - verify_keys[key_id] = verify_key - - for key_id in response["signatures"][server_name]: - if key_id not in response["verify_keys"]: - raise KeyLookupError( - "Key response must include verification keys for all" - " signatures" - ) - if key_id in verify_keys: - verify_signed_json( - response, - server_name, - verify_keys[key_id] - ) - - yield self.store.store_server_certificate( - server_name, - server_name, - time_now_ms, - tls_certificate, - ) - - yield self.store_keys( - server_name=server_name, - from_server=server_name, - verify_keys=verify_keys, - ) - - defer.returnValue(verify_keys) - def store_keys(self, server_name, from_server, verify_keys): """Store a collection of verify keys for a given server Args: -- cgit 1.4.1 From 9b827c40ca71510390c92472f7ec5cfcff9e69b2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 31 Oct 2018 15:42:23 +0000 Subject: Log some bits about event creation (#4121) I found these helpful in debugging my room upgrade tests. --- changelog.d/4121.misc | 1 + synapse/handlers/message.py | 3 +++ synapse/handlers/room.py | 4 ++++ 3 files changed, 8 insertions(+) create mode 100644 changelog.d/4121.misc (limited to 'synapse') diff --git a/changelog.d/4121.misc b/changelog.d/4121.misc new file mode 100644 index 0000000000..9c29d80c3f --- /dev/null +++ b/changelog.d/4121.misc @@ -0,0 +1 @@ +Log some bits about room creation diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 969e588e73..a7cd779b02 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -427,6 +427,9 @@ class EventCreationHandler(object): if event.is_state(): prev_state = yield self.deduplicate_state_event(event, context) + logger.info( + "Not bothering to persist duplicate state event %s", event.event_id, + ) if prev_state is not None: defer.returnValue(prev_state) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1d9417ff1a..fe960342b9 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -104,6 +104,8 @@ class RoomCreationHandler(BaseHandler): creator_id=user_id, is_public=r["is_public"], ) + logger.info("Creating new room %s to replace %s", new_room_id, old_room_id) + # we create and auth the tombstone event before properly creating the new # room, to check our user has perms in the old room. tombstone_event, tombstone_context = ( @@ -522,6 +524,7 @@ class RoomCreationHandler(BaseHandler): @defer.inlineCallbacks def send(etype, content, **kwargs): event = create(etype, content, **kwargs) + logger.info("Sending %s in new room", etype) yield self.event_creation_handler.create_and_send_nonmember_event( creator, event, @@ -544,6 +547,7 @@ class RoomCreationHandler(BaseHandler): content=creation_content, ) + logger.info("Sending %s in new room", EventTypes.Member) yield self.room_member_handler.update_membership( creator, creator.user, -- cgit 1.4.1 From 94c7fadc98542d582ff67c5ac788081c0d836e6b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 15:11:35 +0100 Subject: Attempt to move room aliases on room upgrades --- changelog.d/4101.feature | 1 + synapse/handlers/directory.py | 34 +++++++++--- synapse/handlers/room.py | 121 +++++++++++++++++++++++++++++++++++++++--- 3 files changed, 142 insertions(+), 14 deletions(-) create mode 100644 changelog.d/4101.feature (limited to 'synapse') diff --git a/changelog.d/4101.feature b/changelog.d/4101.feature new file mode 100644 index 0000000000..a3f7dbdcdd --- /dev/null +++ b/changelog.d/4101.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 7d67bf803a..0699731c13 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -138,9 +138,30 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def delete_association(self, requester, room_alias): - # association deletion for human users + def delete_association(self, requester, room_alias, send_event=True): + """Remove an alias from the directory + (this is only meant for human users; AS users should call + delete_appservice_association) + + Args: + requester (Requester): + room_alias (RoomAlias): + send_event (bool): Whether to send an updated m.room.aliases event. + Note that, if we delete the canonical alias, we will always attempt + to send an m.room.canonical_alias event + + Returns: + Deferred[unicode]: room id that the alias used to point to + + Raises: + NotFoundError: if the alias doesn't exist + + AuthError: if the user doesn't have perms to delete the alias (ie, the user + is neither the creator of the alias, nor a server admin. + + SynapseError: if the alias belongs to an AS + """ user_id = requester.user.to_string() try: @@ -168,10 +189,11 @@ class DirectoryHandler(BaseHandler): room_id = yield self._delete_association(room_alias) try: - yield self.send_room_alias_update_event( - requester, - room_id - ) + if send_event: + yield self.send_room_alias_update_event( + requester, + room_id + ) yield self._update_canonical_alias( requester, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1d9417ff1a..76811050a6 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,10 +136,15 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) - # and finally, shut down the PLs in the old room, and update them in the new - # room. old_room_state = yield tombstone_context.get_current_state_ids(self.store) + # update any aliases + yield self._move_aliases_to_new_room( + requester, old_room_id, new_room_id, old_room_state, + ) + + # and finally, shut down the PLs in the old room, and update them in the new + # room. yield self._update_upgraded_room_pls( requester, old_room_id, new_room_id, old_room_state, ) @@ -245,11 +250,6 @@ class RoomCreationHandler(BaseHandler): if not self.spam_checker.user_may_create_room(user_id): raise SynapseError(403, "You are not permitted to create rooms") - # XXX check alias is free - # canonical_alias = None - - # XXX create association in directory handler - creation_content = { "room_version": new_room_version, "predecessor": { @@ -295,7 +295,112 @@ class RoomCreationHandler(BaseHandler): # XXX invites/joins # XXX 3pid invites - # XXX directory_handler.send_room_alias_update_event + + @defer.inlineCallbacks + def _move_aliases_to_new_room( + self, requester, old_room_id, new_room_id, old_room_state, + ): + directory_handler = self.hs.get_handlers().directory_handler + + aliases = yield self.store.get_aliases_for_room(old_room_id) + + # check to see if we have a canonical alias. + canonical_alias = None + canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, "")) + if canonical_alias_event_id: + canonical_alias_event = yield self.store.get_event(canonical_alias_event_id) + if canonical_alias_event: + canonical_alias = canonical_alias_event.content.get("alias", "") + + # first we try to remove the aliases from the old room (we suppress sending + # the room_aliases event until the end). + # + # Note that we'll only be able to remove aliases that (a) aren't owned by an AS, + # and (b) unless the user is a server admin, which the user created. + # + # This is probably correct - given we don't allow such aliases to be deleted + # normally, it would be odd to allow it in the case of doing a room upgrade - + # but it makes the upgrade less effective, and you have to wonder why a room + # admin can't remove aliases that point to that room anyway. + # (cf https://github.com/matrix-org/synapse/issues/2360) + # + removed_aliases = [] + for alias_str in aliases: + alias = RoomAlias.from_string(alias_str) + try: + yield directory_handler.delete_association( + requester, alias, send_event=False, + ) + except SynapseError as e: + logger.warning( + "Unable to remove alias %s from old room: %s", + alias, e, + ) + else: + removed_aliases.append(alias_str) + + # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest + # of this. + if not removed_aliases: + return + + try: + # this can fail if, for some reason, our user doesn't have perms to send + # m.room.aliases events in the old room (note that we've already checked that + # they have perms to send a tombstone event, so that's not terribly likely). + # + # If that happens, it's regrettable, but we should carry on: it's the same + # as when you remove an alias from the directory normally - it just means that + # the aliases event gets out of sync with the directory + # (cf https://github.com/vector-im/riot-web/issues/2369) + yield directory_handler.send_room_alias_update_event( + requester, old_room_id, + ) + except AuthError as e: + logger.warning( + "Failed to send updated alias event on old room: %s", e, + ) + + # we can now add any aliases we successfully removed to the new room. + for alias in removed_aliases: + try: + yield directory_handler.create_association( + requester, RoomAlias.from_string(alias), + new_room_id, servers=(self.hs.hostname, ), + send_event=False, + ) + logger.info("Moved alias %s to new room", alias) + except SynapseError as e: + # I'm not really expecting this to happen, but it could if the spam + # checking module decides it shouldn't, or similar. + logger.error( + "Error adding alias %s to new room: %s", + alias, e, + ) + + try: + if canonical_alias and (canonical_alias in removed_aliases): + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.CanonicalAlias, + "state_key": "", + "room_id": new_room_id, + "sender": requester.user.to_string(), + "content": {"alias": canonical_alias, }, + }, + ratelimit=False + ) + + yield directory_handler.send_room_alias_update_event( + requester, new_room_id, + ) + except SynapseError as e: + # again I'm not really expecting this to fail, but if it does, I'd rather + # we returned the new room to the client at this point. + logger.error( + "Unable to send updated alias events in new room: %s", e, + ) @defer.inlineCallbacks def create_room(self, requester, config, ratelimit=True, -- cgit 1.4.1 From 0f8591a5a8695aa176736c651a361c40cf228b6d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 29 Oct 2018 15:20:19 +0000 Subject: Avoid else clause on exception for clarity --- synapse/handlers/room.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 76811050a6..9ff4656717 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -331,13 +331,12 @@ class RoomCreationHandler(BaseHandler): yield directory_handler.delete_association( requester, alias, send_event=False, ) + removed_aliases.append(alias_str) except SynapseError as e: logger.warning( "Unable to remove alias %s from old room: %s", alias, e, ) - else: - removed_aliases.append(alias_str) # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest # of this. -- cgit 1.4.1 From a8d41c6aff0e58fc24fae1fe4ae89d28541a63cb Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 31 Oct 2018 13:19:28 -0600 Subject: Include a version query string arg for the consent route --- synapse/handlers/auth.py | 5 ++++- synapse/rest/client/v2_alpha/auth.py | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'synapse') diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d143522d9a..85fc1fc525 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -473,7 +473,10 @@ class AuthHandler(BaseHandler): "version": self.hs.config.user_consent_version, "en": { "name": "Privacy Policy", - "url": "%s/_matrix/consent" % (self.hs.config.public_baseurl,), + "url": "%s/_matrix/consent?v=%s" % ( + self.hs.config.public_baseurl, + self.hs.config.user_consent_version, + ), }, }, }, diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 6f90935b22..a8d8ed6590 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -161,8 +161,9 @@ class AuthRestServlet(RestServlet): html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent" % ( + 'terms_url': "%s/_matrix/consent?v=%s" % ( self.hs.config.public_baseurl, + self.hs.config.user_consent_version, ), 'myurl': "%s/auth/%s/fallback/web" % ( CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS @@ -241,8 +242,9 @@ class AuthRestServlet(RestServlet): else: html = TERMS_TEMPLATE % { 'session': session, - 'terms_url': "%s/_matrix/consent" % ( + 'terms_url': "%s/_matrix/consent?v=%s" % ( self.hs.config.public_baseurl, + self.hs.config.user_consent_version, ), 'myurl': "%s/auth/%s/fallback/web" % ( CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS -- cgit 1.4.1 From aa98e3889667bbbd4186f25336e1a9a5666113d2 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 1 Nov 2018 21:28:35 +1100 Subject: version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/__init__.py b/synapse/__init__.py index 97a57a7ac1..89ea9a9775 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.33.8rc2" +__version__ = "0.33.8" -- cgit 1.4.1 From b3dd6fa98199f58804df903195b6c5a1cc4686d7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Nov 2018 11:43:46 +0000 Subject: Add STATE_V2_TEST room version --- synapse/api/constants.py | 7 ++++++- synapse/state/__init__.py | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 5565e516d6..e63b1e8a38 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -102,6 +102,7 @@ class ThirdPartyEntityKind(object): class RoomVersions(object): V1 = "1" VDH_TEST = "vdh-test-version" + STATE_V2_TEST = "state-v2-test" # the version we will give rooms which are created on this server @@ -109,7 +110,11 @@ DEFAULT_ROOM_VERSION = RoomVersions.V1 # vdh-test-version is a placeholder to get room versioning support working and tested # until we have a working v2. -KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST} +KNOWN_ROOM_VERSIONS = { + RoomVersions.V1, + RoomVersions.VDH_TEST, + RoomVersions.STATE_V2_TEST, +} ServerNoticeMsgType = "m.server_notice" ServerNoticeLimitReached = "m.server_notice.usage_limit_reached" diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9b40b18d5b..943d5d6bb5 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -607,7 +607,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto return v1.resolve_events_with_store( state_sets, event_map, state_res_store.get_events, ) - elif room_version == RoomVersions.VDH_TEST: + elif room_version in (RoomVersions.VDH_TEST, RoomVersions.STATE_V2_TEST): return v2.resolve_events_with_store( state_sets, event_map, state_res_store, ) -- cgit 1.4.1 From 642505abc385afe7849f60c37f8ef99592f8f7b4 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 1 Nov 2018 16:47:05 -0600 Subject: Fix logic error that prevented guests from seeing the privacy policy --- synapse/rest/consent/consent_resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 4cadd71d7e..89b82b0591 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -141,7 +141,7 @@ class ConsentResource(Resource): username = parse_string(request, "u", required=False, default="") userhmac = None has_consented = False - public_version = username != "" + public_version = username == "" if not public_version: userhmac = parse_string(request, "h", required=True, encoding=None) -- cgit 1.4.1 From 54aec35867d24bafa870e5b437a11b9a0c502658 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Nov 2018 10:29:19 +0000 Subject: Fix None exception in state res v2 --- synapse/state/v2.py | 4 ++ tests/state/test_v2.py | 100 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 2 deletions(-) (limited to 'synapse') diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 5d06f7e928..dbc9688c56 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -53,6 +53,10 @@ def resolve_events_with_store(state_sets, event_map, state_res_store): logger.debug("Computing conflicted state") + # We use event_map as a cache, so if its None we need to initialize it + if event_map is None: + event_map = {} + # First split up the un/conflicted state unconflicted_state, conflicted_state = _seperate(state_sets) diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index efd85ebe6c..d67f59b2c7 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -544,8 +544,7 @@ class StateTestCase(unittest.TestCase): state_res_store=TestStateResolutionStore(event_map), ) - self.assertTrue(state_d.called) - state_before = state_d.result + state_before = self.successResultOf(state_d) state_after = dict(state_before) if fake_event.state_key is not None: @@ -599,6 +598,103 @@ class LexicographicalTestCase(unittest.TestCase): self.assertEqual(["o", "l", "n", "m", "p"], res) +class SimpleParamStateTestCase(unittest.TestCase): + def setUp(self): + # We build up a simple DAG. + + event_map = {} + + create_event = FakeEvent( + id="CREATE", + sender=ALICE, + type=EventTypes.Create, + state_key="", + content={"creator": ALICE}, + ).to_event([], []) + event_map[create_event.event_id] = create_event + + alice_member = FakeEvent( + id="IMA", + sender=ALICE, + type=EventTypes.Member, + state_key=ALICE, + content=MEMBERSHIP_CONTENT_JOIN, + ).to_event([create_event.event_id], [create_event.event_id]) + event_map[alice_member.event_id] = alice_member + + join_rules = FakeEvent( + id="IJR", + sender=ALICE, + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.PUBLIC}, + ).to_event( + auth_events=[create_event.event_id, alice_member.event_id], + prev_events=[alice_member.event_id], + ) + event_map[join_rules.event_id] = join_rules + + # Bob and Charlie join at the same time, so there is a fork + bob_member = FakeEvent( + id="IMB", + sender=BOB, + type=EventTypes.Member, + state_key=BOB, + content=MEMBERSHIP_CONTENT_JOIN, + ).to_event( + auth_events=[create_event.event_id, join_rules.event_id], + prev_events=[join_rules.event_id], + ) + event_map[bob_member.event_id] = bob_member + + charlie_member = FakeEvent( + id="IMC", + sender=CHARLIE, + type=EventTypes.Member, + state_key=CHARLIE, + content=MEMBERSHIP_CONTENT_JOIN, + ).to_event( + auth_events=[create_event.event_id, join_rules.event_id], + prev_events=[join_rules.event_id], + ) + event_map[charlie_member.event_id] = charlie_member + + self.event_map = event_map + self.create_event = create_event + self.alice_member = alice_member + self.join_rules = join_rules + self.bob_member = bob_member + self.charlie_member = charlie_member + + self.state_at_bob = { + (e.type, e.state_key): e.event_id + for e in [create_event, alice_member, join_rules, bob_member] + } + + self.state_at_charlie = { + (e.type, e.state_key): e.event_id + for e in [create_event, alice_member, join_rules, charlie_member] + } + + self.expected_combined_state = { + (e.type, e.state_key): e.event_id + for e in [create_event, alice_member, join_rules, bob_member, charlie_member] + } + + def test_event_map_none(self): + # Test that we correctly handle passing `None` as the event_map + + state_d = resolve_events_with_store( + [self.state_at_bob, self.state_at_charlie], + event_map=None, + state_res_store=TestStateResolutionStore(self.event_map), + ) + + state = self.successResultOf(state_d) + + self.assert_dict(self.expected_combined_state, state) + + def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) -- cgit 1.4.1 From 50e328d1e7a61268dbf274f10798ea5994c6c25a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Nov 2018 19:01:29 +0000 Subject: Remove redundant database locks for device list updates We can rely on the application-level per-user linearizer. --- synapse/storage/devices.py | 45 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 62497ab63f..bc3f575b1e 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -239,7 +239,19 @@ class DeviceStore(SQLBaseStore): def update_remote_device_list_cache_entry(self, user_id, device_id, content, stream_id): - """Updates a single user's device in the cache. + """Updates a single device in the cache of a remote user's devicelist. + + Note: assumes that we are the only thread that can be updating this user's + device list. + + Args: + user_id (str): User to update device list for + device_id (str): ID of decivice being updated + content (dict): new data on this device + stream_id (int): the version of the device list + + Returns: + Deferred[None] """ return self.runInteraction( "update_remote_device_list_cache_entry", @@ -272,7 +284,11 @@ class DeviceStore(SQLBaseStore): }, values={ "content": json.dumps(content), - } + }, + + # we don't need to lock, because we assume we are the only thread + # updating this user's devices. + lock=False, ) txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id,)) @@ -289,11 +305,26 @@ class DeviceStore(SQLBaseStore): }, values={ "stream_id": stream_id, - } + }, + + # again, we can assume we are the only thread updating this user's + # extremity. + lock=False, ) def update_remote_device_list_cache(self, user_id, devices, stream_id): - """Replace the cache of the remote user's devices. + """Replace the entire cache of the remote user's devices. + + Note: assumes that we are the only thread that can be updating this user's + device list. + + Args: + user_id (str): User to update device list for + devices (list[dict]): list of device objects supplied over federation + stream_id (int): the version of the device list + + Returns: + Deferred[None] """ return self.runInteraction( "update_remote_device_list_cache", @@ -338,7 +369,11 @@ class DeviceStore(SQLBaseStore): }, values={ "stream_id": stream_id, - } + }, + + # we don't need to lock, because we can assume we are the only thread + # updating this user's extremity. + lock=False, ) def get_devices_by_remote(self, destination, from_stream_id): -- cgit 1.4.1 From 350f654e7b80ff19d1fdf3861093cef09ccf3ab1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Nov 2018 19:10:33 +0000 Subject: Add unique indexes to a couple of tables The indexes on device_lists_remote_extremeties can be unique, and they therefore should, to ensure that the db remains consistent. --- synapse/storage/devices.py | 49 +++++++++++++++++++++- .../schema/delta/40/device_list_streams.sql | 9 ++-- .../delta/52/device_list_streams_unique_idx.sql | 36 ++++++++++++++++ 3 files changed, 88 insertions(+), 6 deletions(-) create mode 100644 synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql (limited to 'synapse') diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index bc3f575b1e..ecdab34e7d 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -22,14 +22,19 @@ from twisted.internet import defer from synapse.api.errors import StoreError from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.background_updates import BackgroundUpdateStore from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList -from ._base import Cache, SQLBaseStore, db_to_json +from ._base import Cache, db_to_json logger = logging.getLogger(__name__) +DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = ( + "drop_device_list_streams_non_unique_indexes" +) -class DeviceStore(SQLBaseStore): + +class DeviceStore(BackgroundUpdateStore): def __init__(self, db_conn, hs): super(DeviceStore, self).__init__(db_conn, hs) @@ -52,6 +57,30 @@ class DeviceStore(SQLBaseStore): columns=["user_id", "device_id"], ) + # create a unique index on device_lists_remote_cache + self.register_background_index_update( + "device_lists_remote_cache_unique_idx", + index_name="device_lists_remote_cache_unique_id", + table="device_lists_remote_cache", + columns=["user_id", "device_id"], + unique=True, + ) + + # And one on device_lists_remote_extremeties + self.register_background_index_update( + "device_lists_remote_extremeties_unique_idx", + index_name="device_lists_remote_extremeties_unique_idx", + table="device_lists_remote_extremeties", + columns=["user_id"], + unique=True, + ) + + # once they complete, we can remove the old non-unique indexes. + self.register_background_update_handler( + DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES, + self._drop_device_list_streams_non_unique_indexes, + ) + @defer.inlineCallbacks def store_device(self, user_id, device_id, initial_device_display_name): @@ -757,3 +786,19 @@ class DeviceStore(SQLBaseStore): "_prune_old_outbound_device_pokes", _prune_txn, ) + + @defer.inlineCallbacks + def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size): + def f(conn): + txn = conn.cursor() + txn.execute( + "DROP INDEX IF EXISTS device_lists_remote_cache_id" + ) + txn.execute( + "DROP INDEX IF EXISTS device_lists_remote_extremeties_id" + ) + txn.close() + + yield self.runWithConnection(f) + yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES) + defer.returnValue(1) diff --git a/synapse/storage/schema/delta/40/device_list_streams.sql b/synapse/storage/schema/delta/40/device_list_streams.sql index 54841b3843..dd6dcb65f1 100644 --- a/synapse/storage/schema/delta/40/device_list_streams.sql +++ b/synapse/storage/schema/delta/40/device_list_streams.sql @@ -20,9 +20,6 @@ CREATE TABLE device_lists_remote_cache ( content TEXT NOT NULL ); -CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id); - - -- The last update we got for a user. Empty if we're not receiving updates for -- that user. CREATE TABLE device_lists_remote_extremeties ( @@ -30,7 +27,11 @@ CREATE TABLE device_lists_remote_extremeties ( stream_id TEXT NOT NULL ); -CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id); +-- we used to create non-unique indexes on these tables, but as of update 52 we create +-- unique indexes concurrently: +-- +-- CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id); +-- CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id); -- Stream of device lists updates. Includes both local and remotes diff --git a/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql b/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql new file mode 100644 index 0000000000..bfa49e6f92 --- /dev/null +++ b/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql @@ -0,0 +1,36 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- register a background update which will create a unique index on +-- device_lists_remote_cache +INSERT into background_updates (update_name, progress_json) + VALUES ('device_lists_remote_cache_unique_idx', '{}'); + +-- and one on device_lists_remote_extremeties +INSERT into background_updates (update_name, progress_json, depends_on) + VALUES ( + 'device_lists_remote_extremeties_unique_idx', '{}', + + -- doesn't really depend on this, but we need to make sure both happen + -- before we drop the old indexes. + 'device_lists_remote_cache_unique_idx' + ); + +-- once they complete, we can drop the old indexes. +INSERT into background_updates (update_name, progress_json, depends_on) + VALUES ( + 'drop_device_list_streams_non_unique_indexes', '{}', + 'device_lists_remote_extremeties_unique_idx' + ); -- cgit 1.4.1 From cb7a6b2379e0e0a4ba8043da98e376b45d05b977 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 3 Nov 2018 00:19:23 +1100 Subject: Fix typing being reset causing infinite syncs (#4127) --- changelog.d/4127.bugfix | 1 + synapse/app/synchrotron.py | 14 ++++ synapse/handlers/typing.py | 14 ++-- tests/rest/client/v2_alpha/test_sync.py | 123 ++++++++++++++++++++++++++++++++ tests/server.py | 8 ++- 5 files changed, 155 insertions(+), 5 deletions(-) create mode 100644 changelog.d/4127.bugfix (limited to 'synapse') diff --git a/changelog.d/4127.bugfix b/changelog.d/4127.bugfix new file mode 100644 index 0000000000..0701d2ceaa --- /dev/null +++ b/changelog.d/4127.bugfix @@ -0,0 +1 @@ +If the typing stream ID goes backwards (as on a worker when the master restarts), the worker's typing handler will no longer erroneously report rooms containing new typing events. diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 3926c7f263..0354e82bf8 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -226,7 +226,15 @@ class SynchrotronPresence(object): class SynchrotronTyping(object): def __init__(self, hs): self._latest_room_serial = 0 + self._reset() + + def _reset(self): + """ + Reset the typing handler's data caches. + """ + # map room IDs to serial numbers self._room_serials = {} + # map room IDs to sets of users currently typing self._room_typing = {} def stream_positions(self): @@ -236,6 +244,12 @@ class SynchrotronTyping(object): return {"typing": self._latest_room_serial} def process_replication_rows(self, token, rows): + if self._latest_room_serial > token: + # The master has gone backwards. To prevent inconsistent data, just + # clear everything. + self._reset() + + # Set the latest serial token to whatever the server gave us. self._latest_room_serial = token for row in rows: diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index c610933dd4..a61bbf9392 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -63,11 +63,8 @@ class TypingHandler(object): self._member_typing_until = {} # clock time we expect to stop self._member_last_federation_poke = {} - # map room IDs to serial numbers - self._room_serials = {} self._latest_room_serial = 0 - # map room IDs to sets of users currently typing - self._room_typing = {} + self._reset() # caches which room_ids changed at which serials self._typing_stream_change_cache = StreamChangeCache( @@ -79,6 +76,15 @@ class TypingHandler(object): 5000, ) + def _reset(self): + """ + Reset the typing handler's data caches. + """ + # map room IDs to serial numbers + self._room_serials = {} + # map room IDs to sets of users currently typing + self._room_typing = {} + def _handle_timeouts(self): logger.info("Checking for typing timeouts") diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 4c30c5f258..99b716f00a 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -15,9 +15,11 @@ from mock import Mock +from synapse.rest.client.v1 import admin, login, room from synapse.rest.client.v2_alpha import sync from tests import unittest +from tests.server import TimedOutException class FilterTestCase(unittest.HomeserverTestCase): @@ -65,3 +67,124 @@ class FilterTestCase(unittest.HomeserverTestCase): ["next_batch", "rooms", "account_data", "to_device", "device_lists"] ).issubset(set(channel.json_body.keys())) ) + + +class SyncTypingTests(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + sync.register_servlets, + ] + user_id = True + hijack_auth = False + + def test_sync_backwards_typing(self): + """ + If the typing serial goes backwards and the typing handler is then reset + (such as when the master restarts and sets the typing serial to 0), we + do not incorrectly return typing information that had a serial greater + than the now-reset serial. + """ + typing_url = "/rooms/%s/typing/%s?access_token=%s" + sync_url = "/sync?timeout=3000000&access_token=%s&since=%s" + + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("otheruser", "pass") + other_access_token = self.login("otheruser", "pass") + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # Invite the other person + self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends some messages + self.helper.send(room, body="Hi!", tok=other_access_token) + self.helper.send(room, body="There!", tok=other_access_token) + + # Start typing. + request, channel = self.make_request( + "PUT", + typing_url % (room, other_user_id, other_access_token), + b'{"typing": true, "timeout": 30000}', + ) + self.render(request) + self.assertEquals(200, channel.code) + + request, channel = self.make_request( + "GET", "/sync?access_token=%s" % (access_token,) + ) + self.render(request) + self.assertEquals(200, channel.code) + next_batch = channel.json_body["next_batch"] + + # Stop typing. + request, channel = self.make_request( + "PUT", + typing_url % (room, other_user_id, other_access_token), + b'{"typing": false}', + ) + self.render(request) + self.assertEquals(200, channel.code) + + # Start typing. + request, channel = self.make_request( + "PUT", + typing_url % (room, other_user_id, other_access_token), + b'{"typing": true, "timeout": 30000}', + ) + self.render(request) + self.assertEquals(200, channel.code) + + # Should return immediately + request, channel = self.make_request( + "GET", sync_url % (access_token, next_batch) + ) + self.render(request) + self.assertEquals(200, channel.code) + next_batch = channel.json_body["next_batch"] + + # Reset typing serial back to 0, as if the master had. + typing = self.hs.get_typing_handler() + typing._latest_room_serial = 0 + + # Since it checks the state token, we need some state to update to + # invalidate the stream token. + self.helper.send(room, body="There!", tok=other_access_token) + + request, channel = self.make_request( + "GET", sync_url % (access_token, next_batch) + ) + self.render(request) + self.assertEquals(200, channel.code) + next_batch = channel.json_body["next_batch"] + + # This should time out! But it does not, because our stream token is + # ahead, and therefore it's saying the typing (that we've actually + # already seen) is new, since it's got a token above our new, now-reset + # stream token. + request, channel = self.make_request( + "GET", sync_url % (access_token, next_batch) + ) + self.render(request) + self.assertEquals(200, channel.code) + next_batch = channel.json_body["next_batch"] + + # Clear the typing information, so that it doesn't think everything is + # in the future. + typing._reset() + + # Now it SHOULD fail as it never completes! + request, channel = self.make_request( + "GET", sync_url % (access_token, next_batch) + ) + self.assertRaises(TimedOutException, self.render, request) diff --git a/tests/server.py b/tests/server.py index 819c854448..cc6dbe04ac 100644 --- a/tests/server.py +++ b/tests/server.py @@ -21,6 +21,12 @@ from synapse.util import Clock from tests.utils import setup_test_homeserver as _sth +class TimedOutException(Exception): + """ + A web query timed out. + """ + + @attr.s class FakeChannel(object): """ @@ -153,7 +159,7 @@ def wait_until_result(clock, request, timeout=100): x += 1 if x > timeout: - raise Exception("Timed out waiting for request to finish.") + raise TimedOutException("Timed out waiting for request to finish.") clock.advance(0.1) -- cgit 1.4.1 From b86d05a279051b59d711c5a1982474994f302803 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Nov 2018 13:44:12 +0000 Subject: Clean up event accesses and tests This is in preparation to refactor FrozenEvent to support different event formats for different room versions --- synapse/federation/units.py | 3 --- synapse/push/httppusher.py | 4 ++-- synapse/push/push_rule_evaluator.py | 4 ++-- tests/replication/slave/storage/test_events.py | 4 ++-- tests/test_federation.py | 2 +- 5 files changed, 7 insertions(+), 10 deletions(-) (limited to 'synapse') diff --git a/synapse/federation/units.py b/synapse/federation/units.py index c5ab14314e..025a79c022 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -117,9 +117,6 @@ class Transaction(JsonEncodedObject): "Require 'transaction_id' to construct a Transaction" ) - for p in pdus: - p.transaction_id = kwargs["transaction_id"] - kwargs["pdus"] = [p.get_pdu_json() for p in pdus] return Transaction(**kwargs) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 6bd703632d..87fa7f006a 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -311,10 +311,10 @@ class HttpPusher(object): ] } } - if event.type == 'm.room.member': + if event.type == 'm.room.member' and event.is_state(): d['notification']['membership'] = event.content['membership'] d['notification']['user_is_target'] = event.state_key == self.user_id - if self.hs.config.push_include_content and 'content' in event: + if self.hs.config.push_include_content and event.content: d['notification']['content'] = event.content # We no longer send aliases separately, instead, we send the human diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 2bd321d530..cf6c8b875e 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -124,7 +124,7 @@ class PushRuleEvaluatorForEvent(object): # XXX: optimisation: cache our pattern regexps if condition['key'] == 'content.body': - body = self._event["content"].get("body", None) + body = self._event.content.get("body", None) if not body: return False @@ -140,7 +140,7 @@ class PushRuleEvaluatorForEvent(object): if not display_name: return False - body = self._event["content"].get("body", None) + body = self._event.content.get("body", None) if not body: return False diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 41be5d5a1a..1688a741d1 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -28,8 +28,8 @@ ROOM_ID = "!room:blue" def dict_equals(self, other): - me = encode_canonical_json(self._event_dict) - them = encode_canonical_json(other._event_dict) + me = encode_canonical_json(self.get_pdu_json()) + them = encode_canonical_json(other.get_pdu_json()) return me == them diff --git a/tests/test_federation.py b/tests/test_federation.py index 952a0a7b51..e1a34ccffd 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -112,7 +112,7 @@ class MessageAcceptTests(unittest.TestCase): "origin_server_ts": 1, "type": "m.room.message", "origin": "test.serv", - "content": "hewwo?", + "content": {"body": "hewwo?"}, "auth_events": [], "prev_events": [("two:test.serv", {}), (most_recent, {})], } -- cgit 1.4.1 From 0467384d2f1bd6496b81165689662cd4d3f9deba Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Sat, 3 Nov 2018 02:28:07 +1100 Subject: Set the encoding to UTF8 in the default logconfig (#4138) --- changelog.d/4138.misc | 1 + synapse/config/logger.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/4138.misc (limited to 'synapse') diff --git a/changelog.d/4138.misc b/changelog.d/4138.misc new file mode 100644 index 0000000000..300199f8e8 --- /dev/null +++ b/changelog.d/4138.misc @@ -0,0 +1 @@ +The default logging config will now set an explicit log file encoding of UTF-8. diff --git a/synapse/config/logger.py b/synapse/config/logger.py index e9a936118d..7081868963 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -50,6 +50,7 @@ handlers: maxBytes: 104857600 backupCount: 10 filters: [context] + encoding: utf8 console: class: logging.StreamHandler formatter: precise -- cgit 1.4.1 From bc80b3f454aa9b9ca8bc710ff502b83892ac0a91 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 5 Nov 2018 13:35:15 +0000 Subject: Add helpers for getting prev and auth events (#4139) * Add helpers for getting prev and auth events This is in preparation for allowing the event format to change between room versions. --- changelog.d/4139.misc | 1 + synapse/event_auth.py | 4 +-- synapse/events/__init__.py | 18 +++++++++++++ synapse/federation/transaction_queue.py | 4 +-- synapse/handlers/federation.py | 48 ++++++++++++++++----------------- synapse/state/__init__.py | 2 +- synapse/state/v2.py | 16 +++++------ synapse/storage/event_federation.py | 4 +-- synapse/storage/events.py | 8 +++--- tests/state/test_v2.py | 2 +- 10 files changed, 62 insertions(+), 45 deletions(-) create mode 100644 changelog.d/4139.misc (limited to 'synapse') diff --git a/changelog.d/4139.misc b/changelog.d/4139.misc new file mode 100644 index 0000000000..d63d9e7003 --- /dev/null +++ b/changelog.d/4139.misc @@ -0,0 +1 @@ +Add helpers functions for getting prev and auth events of an event diff --git a/synapse/event_auth.py b/synapse/event_auth.py index d4d4474847..c81d8e6729 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -200,11 +200,11 @@ def _is_membership_change_allowed(event, auth_events): membership = event.content["membership"] # Check if this is the room creator joining: - if len(event.prev_events) == 1 and Membership.JOIN == membership: + if len(event.prev_event_ids()) == 1 and Membership.JOIN == membership: # Get room creation event: key = (EventTypes.Create, "", ) create = auth_events.get(key) - if create and event.prev_events[0][0] == create.event_id: + if create and event.prev_event_ids()[0] == create.event_id: if create.content["creator"] == event.state_key: return diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 12f1eb0a3e..84c75495d5 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -159,6 +159,24 @@ class EventBase(object): def keys(self): return six.iterkeys(self._event_dict) + def prev_event_ids(self): + """Returns the list of prev event IDs. The order matches the order + specified in the event, though there is no meaning to it. + + Returns: + list[str]: The list of event IDs of this event's prev_events + """ + return [e for e, _ in self.prev_events] + + def auth_event_ids(self): + """Returns the list of auth event IDs. The order matches the order + specified in the event, though there is no meaning to it. + + Returns: + list[str]: The list of event IDs of this event's auth_events + """ + return [e for e, _ in self.auth_events] + class FrozenEvent(EventBase): def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None): diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 3fdd63be95..099ace28c1 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -183,9 +183,7 @@ class TransactionQueue(object): # banned then it won't receive the event because it won't # be in the room after the ban. destinations = yield self.state.get_current_hosts_in_room( - event.room_id, latest_event_ids=[ - prev_id for prev_id, _ in event.prev_events - ], + event.room_id, latest_event_ids=event.prev_event_ids(), ) except Exception: logger.exception( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cd5b9bbb19..9ca5fd8724 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -239,7 +239,7 @@ class FederationHandler(BaseHandler): room_id, event_id, min_depth, ) - prevs = {e_id for e_id, _ in pdu.prev_events} + prevs = set(pdu.prev_event_ids()) seen = yield self.store.have_seen_events(prevs) if min_depth and pdu.depth < min_depth: @@ -607,7 +607,7 @@ class FederationHandler(BaseHandler): if e.event_id in seen_ids: continue e.internal_metadata.outlier = True - auth_ids = [e_id for e_id, _ in e.auth_events] + auth_ids = e.auth_event_ids() auth = { (e.type, e.state_key): e for e in auth_chain if e.event_id in auth_ids or e.type == EventTypes.Create @@ -726,7 +726,7 @@ class FederationHandler(BaseHandler): edges = [ ev.event_id for ev in events - if set(e_id for e_id, _ in ev.prev_events) - event_ids + if set(ev.prev_event_ids()) - event_ids ] logger.info( @@ -753,7 +753,7 @@ class FederationHandler(BaseHandler): required_auth = set( a_id for event in events + list(state_events.values()) + list(auth_events.values()) - for a_id, _ in event.auth_events + for a_id in event.auth_event_ids() ) auth_events.update({ e_id: event_map[e_id] for e_id in required_auth if e_id in event_map @@ -769,7 +769,7 @@ class FederationHandler(BaseHandler): auth_events.update(ret_events) required_auth.update( - a_id for event in ret_events.values() for a_id, _ in event.auth_events + a_id for event in ret_events.values() for a_id in event.auth_event_ids() ) missing_auth = required_auth - set(auth_events) @@ -796,7 +796,7 @@ class FederationHandler(BaseHandler): required_auth.update( a_id for event in results if event - for a_id, _ in event.auth_events + for a_id in event.auth_event_ids() ) missing_auth = required_auth - set(auth_events) @@ -816,7 +816,7 @@ class FederationHandler(BaseHandler): "auth_events": { (auth_events[a_id].type, auth_events[a_id].state_key): auth_events[a_id] - for a_id, _ in a.auth_events + for a_id in a.auth_event_ids() if a_id in auth_events } }) @@ -828,7 +828,7 @@ class FederationHandler(BaseHandler): "auth_events": { (auth_events[a_id].type, auth_events[a_id].state_key): auth_events[a_id] - for a_id, _ in event_map[e_id].auth_events + for a_id in event_map[e_id].auth_event_ids() if a_id in auth_events } }) @@ -1041,17 +1041,17 @@ class FederationHandler(BaseHandler): Raises: SynapseError if the event does not pass muster """ - if len(ev.prev_events) > 20: + if len(ev.prev_event_ids()) > 20: logger.warn("Rejecting event %s which has %i prev_events", - ev.event_id, len(ev.prev_events)) + ev.event_id, len(ev.prev_event_ids())) raise SynapseError( http_client.BAD_REQUEST, "Too many prev_events", ) - if len(ev.auth_events) > 10: + if len(ev.auth_event_ids()) > 10: logger.warn("Rejecting event %s which has %i auth_events", - ev.event_id, len(ev.auth_events)) + ev.event_id, len(ev.auth_event_ids())) raise SynapseError( http_client.BAD_REQUEST, "Too many auth_events", @@ -1076,7 +1076,7 @@ class FederationHandler(BaseHandler): def on_event_auth(self, event_id): event = yield self.store.get_event(event_id) auth = yield self.store.get_auth_chain( - [auth_id for auth_id, _ in event.auth_events], + [auth_id for auth_id in event.auth_event_ids()], include_given=True ) defer.returnValue([e for e in auth]) @@ -1698,7 +1698,7 @@ class FederationHandler(BaseHandler): missing_auth_events = set() for e in itertools.chain(auth_events, state, [event]): - for e_id, _ in e.auth_events: + for e_id in e.auth_event_ids(): if e_id not in event_map: missing_auth_events.add(e_id) @@ -1717,7 +1717,7 @@ class FederationHandler(BaseHandler): for e in itertools.chain(auth_events, state, [event]): auth_for_e = { (event_map[e_id].type, event_map[e_id].state_key): event_map[e_id] - for e_id, _ in e.auth_events + for e_id in e.auth_event_ids() if e_id in event_map } if create_event: @@ -1785,10 +1785,10 @@ class FederationHandler(BaseHandler): # This is a hack to fix some old rooms where the initial join event # didn't reference the create event in its auth events. - if event.type == EventTypes.Member and not event.auth_events: - if len(event.prev_events) == 1 and event.depth < 5: + if event.type == EventTypes.Member and not event.auth_event_ids(): + if len(event.prev_event_ids()) == 1 and event.depth < 5: c = yield self.store.get_event( - event.prev_events[0][0], + event.prev_event_ids()[0], allow_none=True, ) if c and c.type == EventTypes.Create: @@ -1835,7 +1835,7 @@ class FederationHandler(BaseHandler): # Now get the current auth_chain for the event. local_auth_chain = yield self.store.get_auth_chain( - [auth_id for auth_id, _ in event.auth_events], + [auth_id for auth_id in event.auth_event_ids()], include_given=True ) @@ -1891,7 +1891,7 @@ class FederationHandler(BaseHandler): """ # Check if we have all the auth events. current_state = set(e.event_id for e in auth_events.values()) - event_auth_events = set(e_id for e_id, _ in event.auth_events) + event_auth_events = set(event.auth_event_ids()) if event.is_state(): event_key = (event.type, event.state_key) @@ -1935,7 +1935,7 @@ class FederationHandler(BaseHandler): continue try: - auth_ids = [e_id for e_id, _ in e.auth_events] + auth_ids = e.auth_event_ids() auth = { (e.type, e.state_key): e for e in remote_auth_chain if e.event_id in auth_ids or e.type == EventTypes.Create @@ -1956,7 +1956,7 @@ class FederationHandler(BaseHandler): pass have_events = yield self.store.get_seen_events_with_rejections( - [e_id for e_id, _ in event.auth_events] + event.auth_event_ids() ) seen_events = set(have_events.keys()) except Exception: @@ -2058,7 +2058,7 @@ class FederationHandler(BaseHandler): continue try: - auth_ids = [e_id for e_id, _ in ev.auth_events] + auth_ids = ev.auth_event_ids() auth = { (e.type, e.state_key): e for e in result["auth_chain"] @@ -2250,7 +2250,7 @@ class FederationHandler(BaseHandler): missing_remote_ids = [e.event_id for e in missing_remotes] base_remote_rejected = list(missing_remotes) for e in missing_remotes: - for e_id, _ in e.auth_events: + for e_id in e.auth_event_ids(): if e_id in missing_remote_ids: try: base_remote_rejected.remove(e) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 943d5d6bb5..70048b0c09 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -261,7 +261,7 @@ class StateHandler(object): logger.debug("calling resolve_state_groups from compute_event_context") entry = yield self.resolve_state_groups_for_events( - event.room_id, [e for e, _ in event.prev_events], + event.room_id, event.prev_event_ids(), ) prev_state_ids = entry.state diff --git a/synapse/state/v2.py b/synapse/state/v2.py index dbc9688c56..3573bb0028 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -159,7 +159,7 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store): event = yield _get_event(event_id, event_map, state_res_store) pl = None - for aid, _ in event.auth_events: + for aid in event.auth_event_ids(): aev = yield _get_event(aid, event_map, state_res_store) if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""): pl = aev @@ -167,7 +167,7 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store): if pl is None: # Couldn't find power level. Check if they're the creator of the room - for aid, _ in event.auth_events: + for aid in event.auth_event_ids(): aev = yield _get_event(aid, event_map, state_res_store) if (aev.type, aev.state_key) == (EventTypes.Create, ""): if aev.content.get("creator") == event.sender: @@ -299,7 +299,7 @@ def _add_event_and_auth_chain_to_graph(graph, event_id, event_map, graph.setdefault(eid, set()) event = yield _get_event(eid, event_map, state_res_store) - for aid, _ in event.auth_events: + for aid in event.auth_event_ids(): if aid in auth_diff: if aid not in graph: state.append(aid) @@ -369,7 +369,7 @@ def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store): event = event_map[event_id] auth_events = {} - for aid, _ in event.auth_events: + for aid in event.auth_event_ids(): ev = yield _get_event(aid, event_map, state_res_store) if ev.rejected_reason is None: @@ -417,9 +417,9 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map, while pl: mainline.append(pl) pl_ev = yield _get_event(pl, event_map, state_res_store) - auth_events = pl_ev.auth_events + auth_events = pl_ev.auth_event_ids() pl = None - for aid, _ in auth_events: + for aid in auth_events: ev = yield _get_event(aid, event_map, state_res_store) if (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""): pl = aid @@ -464,10 +464,10 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor if depth is not None: defer.returnValue(depth) - auth_events = event.auth_events + auth_events = event.auth_event_ids() event = None - for aid, _ in auth_events: + for aid in auth_events: aev = yield _get_event(aid, event_map, state_res_store) if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""): event = aev diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 3faca2a042..d3b9dea1d6 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -477,7 +477,7 @@ class EventFederationStore(EventFederationWorkerStore): "is_state": False, } for ev in events - for e_id, _ in ev.prev_events + for e_id in ev.prev_event_ids() ], ) @@ -510,7 +510,7 @@ class EventFederationStore(EventFederationWorkerStore): txn.executemany(query, [ (e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False) - for ev in events for e_id, _ in ev.prev_events + for ev in events for e_id in ev.prev_event_ids() if not ev.internal_metadata.is_outlier() ]) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 919e855f3b..2047110b1d 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -416,7 +416,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore ) if len_1: all_single_prev_not_state = all( - len(event.prev_events) == 1 + len(event.prev_event_ids()) == 1 and not event.is_state() for event, ctx in ev_ctx_rm ) @@ -440,7 +440,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore # guess this by looking at the prev_events and checking # if they match the current forward extremities. for ev, _ in ev_ctx_rm: - prev_event_ids = set(e for e, _ in ev.prev_events) + prev_event_ids = set(ev.prev_event_ids()) if latest_event_ids == prev_event_ids: state_delta_reuse_delta_counter.inc() break @@ -551,7 +551,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore result.difference_update( e_id for event in new_events - for e_id, _ in event.prev_events + for e_id in event.prev_event_ids() ) # Finally, remove any events which are prev_events of any existing events. @@ -869,7 +869,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore "auth_id": auth_id, } for event, _ in events_and_contexts - for auth_id, _ in event.auth_events + for auth_id in event.auth_event_ids() if event.is_state() ], ) diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index d67f59b2c7..2e073a3afc 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -753,7 +753,7 @@ class TestStateResolutionStore(object): result.add(event_id) event = self.event_map[event_id] - for aid, _ in event.auth_events: + for aid in event.auth_event_ids(): stack.append(aid) return list(result) -- cgit 1.4.1 From efdcbbe46bfe39f0dd3ef508bb08c37326892adc Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 6 Nov 2018 05:53:44 +1100 Subject: Tests for user consent resource (#4140) --- changelog.d/4140.bugfix | 1 + synapse/rest/consent/consent_resource.py | 2 +- tests/rest/client/test_consent.py | 111 +++++++++++++++++++++++++++++++ tests/server.py | 20 +++++- tests/unittest.py | 12 +++- 5 files changed, 140 insertions(+), 6 deletions(-) create mode 100644 changelog.d/4140.bugfix create mode 100644 tests/rest/client/test_consent.py (limited to 'synapse') diff --git a/changelog.d/4140.bugfix b/changelog.d/4140.bugfix new file mode 100644 index 0000000000..c7e0ee229d --- /dev/null +++ b/changelog.d/4140.bugfix @@ -0,0 +1 @@ +Generating the user consent URI no longer fails on Python 3. diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 89b82b0591..c85e84b465 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -227,7 +227,7 @@ class ConsentResource(Resource): key=self._hmac_secret, msg=userid.encode('utf-8'), digestmod=sha256, - ).hexdigest() + ).hexdigest().encode('ascii') if not compare_digest(want_mac, userhmac): raise SynapseError(http_client.FORBIDDEN, "HMAC incorrect") diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py new file mode 100644 index 0000000000..df3f1cde6e --- /dev/null +++ b/tests/rest/client/test_consent.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from synapse.api.urls import ConsentURIBuilder +from synapse.rest.client.v1 import admin, login, room +from synapse.rest.consent import consent_resource + +from tests import unittest +from tests.server import render + +try: + from synapse.push.mailer import load_jinja2_templates +except Exception: + load_jinja2_templates = None + + +class ConsentResourceTestCase(unittest.HomeserverTestCase): + skip = "No Jinja installed" if not load_jinja2_templates else None + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def make_homeserver(self, reactor, clock): + + config = self.default_config() + config.user_consent_version = "1" + config.public_baseurl = "" + config.form_secret = "123abc" + + # Make some temporary templates... + temp_consent_path = self.mktemp() + os.mkdir(temp_consent_path) + os.mkdir(os.path.join(temp_consent_path, 'en')) + config.user_consent_template_dir = os.path.abspath(temp_consent_path) + + with open(os.path.join(temp_consent_path, "en/1.html"), 'w') as f: + f.write("{{version}},{{has_consented}}") + + with open(os.path.join(temp_consent_path, "en/success.html"), 'w') as f: + f.write("yay!") + + hs = self.setup_test_homeserver(config=config) + return hs + + def test_accept_consent(self): + """ + A user can use the consent form to accept the terms. + """ + uri_builder = ConsentURIBuilder(self.hs.config) + resource = consent_resource.ConsentResource(self.hs) + + # Register a user + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Fetch the consent page, to get the consent version + consent_uri = ( + uri_builder.build_user_consent_uri(user_id).replace("_matrix/", "") + + "&u=user" + ) + request, channel = self.make_request( + "GET", consent_uri, access_token=access_token, shorthand=False + ) + render(request, resource, self.reactor) + self.assertEqual(channel.code, 200) + + # Get the version from the body, and whether we've consented + version, consented = channel.result["body"].decode('ascii').split(",") + self.assertEqual(consented, "False") + + # POST to the consent page, saying we've agreed + request, channel = self.make_request( + "POST", + consent_uri + "&v=" + version, + access_token=access_token, + shorthand=False, + ) + render(request, resource, self.reactor) + self.assertEqual(channel.code, 200) + + # Fetch the consent page, to get the consent version -- it should have + # changed + request, channel = self.make_request( + "GET", consent_uri, access_token=access_token, shorthand=False + ) + render(request, resource, self.reactor) + self.assertEqual(channel.code, 200) + + # Get the version from the body, and check that it's the version we + # agreed to, and that we've consented to it. + version, consented = channel.result["body"].decode('ascii').split(",") + self.assertEqual(consented, "True") + self.assertEqual(version, "1") diff --git a/tests/server.py b/tests/server.py index cc6dbe04ac..f63f33c94f 100644 --- a/tests/server.py +++ b/tests/server.py @@ -104,10 +104,24 @@ class FakeSite: return FakeLogger() -def make_request(method, path, content=b"", access_token=None, request=SynapseRequest): +def make_request( + method, path, content=b"", access_token=None, request=SynapseRequest, shorthand=True +): """ Make a web request using the given method and path, feed it the content, and return the Request and the Channel underneath. + + Args: + method (bytes/unicode): The HTTP request method ("verb"). + path (bytes/unicode): The HTTP path, suitably URL encoded (e.g. + escaped UTF-8 & spaces and such). + content (bytes or dict): The body of the request. JSON-encoded, if + a dict. + shorthand: Whether to try and be helpful and prefix the given URL + with the usual REST API path, if it doesn't contain it. + + Returns: + A synapse.http.site.SynapseRequest. """ if not isinstance(method, bytes): method = method.encode('ascii') @@ -115,8 +129,8 @@ def make_request(method, path, content=b"", access_token=None, request=SynapseRe if not isinstance(path, bytes): path = path.encode('ascii') - # Decorate it to be the full path - if not path.startswith(b"/_matrix"): + # Decorate it to be the full path, if we're using shorthand + if shorthand and not path.startswith(b"/_matrix"): path = b"/_matrix/client/r0/" + path path = path.replace(b"//", b"/") diff --git a/tests/unittest.py b/tests/unittest.py index 4d40bdb6a5..5e35c943d7 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -258,7 +258,13 @@ class HomeserverTestCase(TestCase): """ def make_request( - self, method, path, content=b"", access_token=None, request=SynapseRequest + self, + method, + path, + content=b"", + access_token=None, + request=SynapseRequest, + shorthand=True, ): """ Create a SynapseRequest at the path using the method and containing the @@ -270,6 +276,8 @@ class HomeserverTestCase(TestCase): escaped UTF-8 & spaces and such). content (bytes or dict): The body of the request. JSON-encoded, if a dict. + shorthand: Whether to try and be helpful and prefix the given URL + with the usual REST API path, if it doesn't contain it. Returns: A synapse.http.site.SynapseRequest. @@ -277,7 +285,7 @@ class HomeserverTestCase(TestCase): if isinstance(content, dict): content = json.dumps(content).encode('utf8') - return make_request(method, path, content, access_token, request) + return make_request(method, path, content, access_token, request, shorthand) def render(self, request): """ -- cgit 1.4.1 From f1087106cf637e3c108c096ff789100bcbcc461c Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Mon, 5 Nov 2018 17:59:29 -0500 Subject: handle empty backups according to latest spec proposal (#4123) fixes #4056 --- changelog.d/4123.bugfix | 1 + synapse/handlers/e2e_room_keys.py | 22 ++++++--- synapse/rest/client/v2_alpha/room_keys.py | 21 ++++++-- tests/handlers/test_e2e_room_keys.py | 79 +++++++++++++++---------------- 4 files changed, 71 insertions(+), 52 deletions(-) create mode 100644 changelog.d/4123.bugfix (limited to 'synapse') diff --git a/changelog.d/4123.bugfix b/changelog.d/4123.bugfix new file mode 100644 index 0000000000..b82bc2aad3 --- /dev/null +++ b/changelog.d/4123.bugfix @@ -0,0 +1 @@ +fix return code of empty key backups diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 5edb3cfe04..42b040375f 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -19,7 +19,7 @@ from six import iteritems from twisted.internet import defer -from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError +from synapse.api.errors import NotFoundError, RoomKeysVersionError, StoreError from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) @@ -55,6 +55,8 @@ class E2eRoomKeysHandler(object): room_id(string): room ID to get keys for, for None to get keys for all rooms session_id(string): session ID to get keys for, for None to get keys for all sessions + Raises: + NotFoundError: if the backup version does not exist Returns: A deferred list of dicts giving the session_data and message metadata for these room keys. @@ -63,13 +65,19 @@ class E2eRoomKeysHandler(object): # we deliberately take the lock to get keys so that changing the version # works atomically with (yield self._upload_linearizer.queue(user_id)): + # make sure the backup version exists + try: + yield self.store.get_e2e_room_keys_version_info(user_id, version) + except StoreError as e: + if e.code == 404: + raise NotFoundError("Unknown backup version") + else: + raise + results = yield self.store.get_e2e_room_keys( user_id, version, room_id, session_id ) - if results['rooms'] == {}: - raise SynapseError(404, "No room_keys found") - defer.returnValue(results) @defer.inlineCallbacks @@ -120,7 +128,7 @@ class E2eRoomKeysHandler(object): } Raises: - SynapseError: with code 404 if there are no versions defined + NotFoundError: if there are no versions defined RoomKeysVersionError: if the uploaded version is not the current version """ @@ -134,7 +142,7 @@ class E2eRoomKeysHandler(object): version_info = yield self.store.get_e2e_room_keys_version_info(user_id) except StoreError as e: if e.code == 404: - raise SynapseError(404, "Version '%s' not found" % (version,)) + raise NotFoundError("Version '%s' not found" % (version,)) else: raise @@ -148,7 +156,7 @@ class E2eRoomKeysHandler(object): raise RoomKeysVersionError(current_version=version_info['version']) except StoreError as e: if e.code == 404: - raise SynapseError(404, "Version '%s' not found" % (version,)) + raise NotFoundError("Version '%s' not found" % (version,)) else: raise diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 45b5817d8b..ab3f1bd21a 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, @@ -208,10 +208,25 @@ class RoomKeysServlet(RestServlet): user_id, version, room_id, session_id ) + # Convert room_keys to the right format to return. if session_id: - room_keys = room_keys['rooms'][room_id]['sessions'][session_id] + # If the client requests a specific session, but that session was + # not backed up, then return an M_NOT_FOUND. + if room_keys['rooms'] == {}: + raise NotFoundError("No room_keys found") + else: + room_keys = room_keys['rooms'][room_id]['sessions'][session_id] elif room_id: - room_keys = room_keys['rooms'][room_id] + # If the client requests all sessions from a room, but no sessions + # are found, then return an empty result rather than an error, so + # that clients don't have to handle an error condition, and an + # empty result is valid. (Similarly if the client requests all + # sessions from the backup, but in that case, room_keys is already + # in the right format, so we don't need to do anything about it.) + if room_keys['rooms'] == {}: + room_keys = {'sessions': {}} + else: + room_keys = room_keys['rooms'][room_id] defer.returnValue((200, room_keys)) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 9e08eac0a5..c8994f416e 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -169,8 +169,8 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): self.assertEqual(res, 404) @defer.inlineCallbacks - def test_get_missing_room_keys(self): - """Check that we get a 404 on querying missing room_keys + def test_get_missing_backup(self): + """Check that we get a 404 on querying missing backup """ res = None try: @@ -179,19 +179,20 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res = e.code self.assertEqual(res, 404) - # check we also get a 404 even if the version is valid + @defer.inlineCallbacks + def test_get_missing_room_keys(self): + """Check we get an empty response from an empty backup + """ version = yield self.handler.create_version(self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }) self.assertEqual(version, "1") - res = None - try: - yield self.handler.get_room_keys(self.local_user, version) - except errors.SynapseError as e: - res = e.code - self.assertEqual(res, 404) + res = yield self.handler.get_room_keys(self.local_user, version) + self.assertDictEqual(res, { + "rooms": {} + }) # TODO: test the locking semantics when uploading room_keys, # although this is probably best done in sytest @@ -345,17 +346,15 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): # check for bulk-delete yield self.handler.upload_room_keys(self.local_user, version, room_keys) yield self.handler.delete_room_keys(self.local_user, version) - res = None - try: - yield self.handler.get_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", - session_id="c0ff33", - ) - except errors.SynapseError as e: - res = e.code - self.assertEqual(res, 404) + res = yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + self.assertDictEqual(res, { + "rooms": {} + }) # check for bulk-delete per room yield self.handler.upload_room_keys(self.local_user, version, room_keys) @@ -364,17 +363,15 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): version, room_id="!abc:matrix.org", ) - res = None - try: - yield self.handler.get_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", - session_id="c0ff33", - ) - except errors.SynapseError as e: - res = e.code - self.assertEqual(res, 404) + res = yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + self.assertDictEqual(res, { + "rooms": {} + }) # check for bulk-delete per session yield self.handler.upload_room_keys(self.local_user, version, room_keys) @@ -384,14 +381,12 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): room_id="!abc:matrix.org", session_id="c0ff33", ) - res = None - try: - yield self.handler.get_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", - session_id="c0ff33", - ) - except errors.SynapseError as e: - res = e.code - self.assertEqual(res, 404) + res = yield self.handler.get_room_keys( + self.local_user, + version, + room_id="!abc:matrix.org", + session_id="c0ff33", + ) + self.assertDictEqual(res, { + "rooms": {} + }) -- cgit 1.4.1 From 0f5e51f726756318f355d988856730a9930e2d2f Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 6 Nov 2018 03:32:34 -0700 Subject: Add config variables for enabling terms auth and the policy name (#4142) So people can still collect consent the old way if they want to. --- changelog.d/4004.feature | 2 +- changelog.d/4133.feature | 2 +- changelog.d/4142.feature | 1 + docs/consent_tracking.md | 40 ++++++++++++++++++++++++++++---- synapse/config/consent_config.py | 18 ++++++++++++++ synapse/handlers/auth.py | 2 +- synapse/rest/client/v2_alpha/register.py | 2 +- synapse/rest/consent/consent_resource.py | 2 +- tests/test_terms_auth.py | 5 ++-- tests/utils.py | 2 ++ 10 files changed, 65 insertions(+), 11 deletions(-) create mode 100644 changelog.d/4142.feature (limited to 'synapse') diff --git a/changelog.d/4004.feature b/changelog.d/4004.feature index ef5cdaf5ec..89975f4c6e 100644 --- a/changelog.d/4004.feature +++ b/changelog.d/4004.feature @@ -1 +1 @@ -Add `m.login.terms` to the registration flow when consent tracking is enabled. **This makes the template arguments conditionally optional on a new `public_version` variable - update your privacy templates to support this.** +Include flags to optionally add `m.login.terms` to the registration flow when consent tracking is enabled. diff --git a/changelog.d/4133.feature b/changelog.d/4133.feature index ef5cdaf5ec..89975f4c6e 100644 --- a/changelog.d/4133.feature +++ b/changelog.d/4133.feature @@ -1 +1 @@ -Add `m.login.terms` to the registration flow when consent tracking is enabled. **This makes the template arguments conditionally optional on a new `public_version` variable - update your privacy templates to support this.** +Include flags to optionally add `m.login.terms` to the registration flow when consent tracking is enabled. diff --git a/changelog.d/4142.feature b/changelog.d/4142.feature new file mode 100644 index 0000000000..89975f4c6e --- /dev/null +++ b/changelog.d/4142.feature @@ -0,0 +1 @@ +Include flags to optionally add `m.login.terms` to the registration flow when consent tracking is enabled. diff --git a/docs/consent_tracking.md b/docs/consent_tracking.md index 3634d13d4f..c586b5f0b6 100644 --- a/docs/consent_tracking.md +++ b/docs/consent_tracking.md @@ -81,9 +81,40 @@ should be a matter of `pip install Jinja2`. On debian, try `apt-get install python-jinja2`. Once this is complete, and the server has been restarted, try visiting -`https:///_matrix/consent`. If correctly configured, you should see a -default policy document. It is now possible to manually construct URIs where -users can give their consent. +`https:///_matrix/consent`. If correctly configured, this should give +an error "Missing string query parameter 'u'". It is now possible to manually +construct URIs where users can give their consent. + +### Enabling consent tracking at registration + +1. Add the following to your configuration: + + ```yaml + user_consent: + require_at_registration: true + policy_name: "Privacy Policy" # or whatever you'd like to call the policy + ``` + +2. In your consent templates, make use of the `public_version` variable to + see if an unauthenticated user is viewing the page. This is typically + wrapped around the form that would be used to actually agree to the document: + + ``` + {% if not public_version %} + +
+ + + + +
+ {% endif %} + ``` + +3. Restart Synapse to apply the changes. + +Visiting `https:///_matrix/consent` should now give you a view of the privacy +document. This is what users will be able to see when registering for accounts. ### Constructing the consent URI @@ -108,7 +139,8 @@ query parameters: Note that not providing a `u` parameter will be interpreted as wanting to view the document from an unauthenticated perspective, such as prior to registration. -Therefore, the `h` parameter is not required in this scenario. +Therefore, the `h` parameter is not required in this scenario. To enable this +behaviour, set `require_at_registration` to `true` in your `user_consent` config. Sending users a server notice asking them to agree to the policy diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index e22c731aad..f193a090ae 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -42,6 +42,14 @@ DEFAULT_CONFIG = """\ # until the user consents to the privacy policy. The value of the setting is # used as the text of the error. # +# 'require_at_registration', if enabled, will add a step to the registration +# process, similar to how captcha works. Users will be required to accept the +# policy before their account is created. +# +# 'policy_name' is the display name of the policy users will see when registering +# for an account. Has no effect unless `require_at_registration` is enabled. +# Defaults to "Privacy Policy". +# # user_consent: # template_dir: res/templates/privacy # version: 1.0 @@ -54,6 +62,8 @@ DEFAULT_CONFIG = """\ # block_events_error: >- # To continue using this homeserver you must review and agree to the # terms and conditions at %(consent_uri)s +# require_at_registration: False +# policy_name: Privacy Policy # """ @@ -67,6 +77,8 @@ class ConsentConfig(Config): self.user_consent_server_notice_content = None self.user_consent_server_notice_to_guests = False self.block_events_without_consent_error = None + self.user_consent_at_registration = False + self.user_consent_policy_name = "Privacy Policy" def read_config(self, config): consent_config = config.get("user_consent") @@ -83,6 +95,12 @@ class ConsentConfig(Config): self.user_consent_server_notice_to_guests = bool(consent_config.get( "send_server_notice_to_guests", False, )) + self.user_consent_at_registration = bool(consent_config.get( + "require_at_registration", False, + )) + self.user_consent_policy_name = consent_config.get( + "policy_name", "Privacy Policy", + ) def default_config(self, **kwargs): return DEFAULT_CONFIG diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 85fc1fc525..a958c45271 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -472,7 +472,7 @@ class AuthHandler(BaseHandler): "privacy_policy": { "version": self.hs.config.user_consent_version, "en": { - "name": "Privacy Policy", + "name": self.hs.config.user_consent_policy_name, "url": "%s/_matrix/consent?v=%s" % ( self.hs.config.public_baseurl, self.hs.config.user_consent_version, diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index c5214330ad..0515715f7c 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -360,7 +360,7 @@ class RegisterRestServlet(RestServlet): ]) # Append m.login.terms to all flows if we're requiring consent - if self.hs.config.block_events_without_consent_error is not None: + if self.hs.config.user_consent_at_registration: new_flows = [] for flow in flows: flow.append(LoginType.TERMS) diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index c85e84b465..e0f7de5d5c 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -142,7 +142,7 @@ class ConsentResource(Resource): userhmac = None has_consented = False public_version = username == "" - if not public_version: + if not public_version or not self.hs.config.user_consent_at_registration: userhmac = parse_string(request, "h", required=True, encoding=None) self._check_hash(username, userhmac) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 7deab5266f..0b71c6feb9 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -42,7 +42,8 @@ class TermsTestCase(unittest.HomeserverTestCase): hs.config.enable_registration_captcha = False def test_ui_auth(self): - self.hs.config.block_events_without_consent_error = True + self.hs.config.user_consent_at_registration = True + self.hs.config.user_consent_policy_name = "My Cool Privacy Policy" self.hs.config.public_baseurl = "https://example.org" self.hs.config.user_consent_version = "1.0" @@ -66,7 +67,7 @@ class TermsTestCase(unittest.HomeserverTestCase): "policies": { "privacy_policy": { "en": { - "name": "Privacy Policy", + "name": "My Cool Privacy Policy", "url": "https://example.org/_matrix/consent?v=1.0", }, "version": "1.0" diff --git a/tests/utils.py b/tests/utils.py index 565bb60d08..67ab916f30 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -123,6 +123,8 @@ def default_config(name): config.user_directory_search_all_users = False config.user_consent_server_notice_content = None config.block_events_without_consent_error = None + config.user_consent_at_registration = False + config.user_consent_policy_name = "Privacy Policy" config.media_storage_providers = [] config.autocreate_auto_join_rooms = True config.auto_join_rooms = [] -- cgit 1.4.1 From b3708830b847245a5d559a099fcaf738250b7cbe Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 8 Nov 2018 01:37:43 +1100 Subject: Fix URL preview bugs (type error when loading cache from db, content-type including quotes) (#4157) --- changelog.d/4157.bugfix | 1 + synapse/http/server.py | 8 +- synapse/rest/media/v1/preview_url_resource.py | 22 +++- tests/rest/media/v1/test_url_preview.py | 164 ++++++++++++++++++++++++++ tests/server.py | 2 + 5 files changed, 187 insertions(+), 10 deletions(-) create mode 100644 changelog.d/4157.bugfix create mode 100644 tests/rest/media/v1/test_url_preview.py (limited to 'synapse') diff --git a/changelog.d/4157.bugfix b/changelog.d/4157.bugfix new file mode 100644 index 0000000000..265514c3af --- /dev/null +++ b/changelog.d/4157.bugfix @@ -0,0 +1 @@ +Loading URL previews from the DB cache on Postgres will no longer cause Unicode type errors when responding to the request, and URL previews will no longer fail if the remote server returns a Content-Type header with the chartype in quotes. \ No newline at end of file diff --git a/synapse/http/server.py b/synapse/http/server.py index b4b25cab19..6a427d96a6 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -468,13 +468,13 @@ def set_cors_headers(request): Args: request (twisted.web.http.Request): The http request to add CORs to. """ - request.setHeader("Access-Control-Allow-Origin", "*") + request.setHeader(b"Access-Control-Allow-Origin", b"*") request.setHeader( - "Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS" + b"Access-Control-Allow-Methods", b"GET, POST, PUT, DELETE, OPTIONS" ) request.setHeader( - "Access-Control-Allow-Headers", - "Origin, X-Requested-With, Content-Type, Accept, Authorization" + b"Access-Control-Allow-Headers", + b"Origin, X-Requested-With, Content-Type, Accept, Authorization" ) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 1a7bfd6b56..91d1dafe64 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import cgi import datetime import errno @@ -24,6 +25,7 @@ import shutil import sys import traceback +import six from six import string_types from six.moves import urllib_parse as urlparse @@ -98,7 +100,7 @@ class PreviewUrlResource(Resource): # XXX: if get_user_by_req fails, what should we do in an async render? requester = yield self.auth.get_user_by_req(request) url = parse_string(request, "url") - if "ts" in request.args: + if b"ts" in request.args: ts = parse_integer(request, "ts") else: ts = self.clock.time_msec() @@ -180,7 +182,12 @@ class PreviewUrlResource(Resource): cache_result["expires_ts"] > ts and cache_result["response_code"] / 100 == 2 ): - defer.returnValue(cache_result["og"]) + # It may be stored as text in the database, not as bytes (such as + # PostgreSQL). If so, encode it back before handing it on. + og = cache_result["og"] + if isinstance(og, six.text_type): + og = og.encode('utf8') + defer.returnValue(og) return media_info = yield self._download_url(url, user) @@ -213,14 +220,17 @@ class PreviewUrlResource(Resource): elif _is_html(media_info['media_type']): # TODO: somehow stop a big HTML tree from exploding synapse's RAM - file = open(media_info['filename']) - body = file.read() - file.close() + with open(media_info['filename'], 'rb') as file: + body = file.read() # clobber the encoding from the content-type, or default to utf-8 # XXX: this overrides any or XML charset headers in the body # which may pose problems, but so far seems to work okay. - match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I) + match = re.match( + r'.*; *charset="?(.*?)"?(;|$)', + media_info['media_type'], + re.I + ) encoding = match.group(1) if match else "utf-8" og = decode_and_calc_og(body, media_info['uri'], encoding) diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py new file mode 100644 index 0000000000..29579cf091 --- /dev/null +++ b/tests/rest/media/v1/test_url_preview.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from mock import Mock + +from twisted.internet.defer import Deferred + +from synapse.config.repository import MediaStorageProviderConfig +from synapse.util.module_loader import load_module + +from tests import unittest + + +class URLPreviewTests(unittest.HomeserverTestCase): + + hijack_auth = True + user_id = "@test:user" + + def make_homeserver(self, reactor, clock): + + self.storage_path = self.mktemp() + os.mkdir(self.storage_path) + + config = self.default_config() + config.url_preview_enabled = True + config.max_spider_size = 9999999 + config.url_preview_url_blacklist = [] + config.media_store_path = self.storage_path + + provider_config = { + "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", + "store_local": True, + "store_synchronous": False, + "store_remote": True, + "config": {"directory": self.storage_path}, + } + + loaded = list(load_module(provider_config)) + [ + MediaStorageProviderConfig(False, False, False) + ] + + config.media_storage_providers = [loaded] + + hs = self.setup_test_homeserver(config=config) + + return hs + + def prepare(self, reactor, clock, hs): + + self.fetches = [] + + def get_file(url, output_stream, max_size): + """ + Returns tuple[int,dict,str,int] of file length, response headers, + absolute URI, and response code. + """ + + def write_to(r): + data, response = r + output_stream.write(data) + return response + + d = Deferred() + d.addCallback(write_to) + self.fetches.append((d, url)) + return d + + client = Mock() + client.get_file = get_file + + self.media_repo = hs.get_media_repository_resource() + preview_url = self.media_repo.children[b'preview_url'] + preview_url.client = client + self.preview_url = preview_url + + def test_cache_returns_correct_type(self): + + request, channel = self.make_request( + "GET", "url_preview?url=matrix.org", shorthand=False + ) + request.render(self.preview_url) + self.pump() + + # We've made one fetch + self.assertEqual(len(self.fetches), 1) + + end_content = ( + b'' + b'' + b'' + b'' + ) + + self.fetches[0][0].callback( + ( + end_content, + ( + len(end_content), + { + b"Content-Length": [b"%d" % (len(end_content))], + b"Content-Type": [b'text/html; charset="utf8"'], + }, + "https://example.com", + 200, + ), + ) + ) + + self.pump() + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} + ) + + # Check the cache returns the correct response + request, channel = self.make_request( + "GET", "url_preview?url=matrix.org", shorthand=False + ) + request.render(self.preview_url) + self.pump() + + # Only one fetch, still, since we'll lean on the cache + self.assertEqual(len(self.fetches), 1) + + # Check the cache response has the same content + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} + ) + + # Clear the in-memory cache + self.assertIn("matrix.org", self.preview_url._cache) + self.preview_url._cache.pop("matrix.org") + self.assertNotIn("matrix.org", self.preview_url._cache) + + # Check the database cache returns the correct response + request, channel = self.make_request( + "GET", "url_preview?url=matrix.org", shorthand=False + ) + request.render(self.preview_url) + self.pump() + + # Only one fetch, still, since we'll lean on the cache + self.assertEqual(len(self.fetches), 1) + + # Check the cache response has the same content + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} + ) diff --git a/tests/server.py b/tests/server.py index 984cfe26d4..7919a1f124 100644 --- a/tests/server.py +++ b/tests/server.py @@ -57,6 +57,8 @@ class FakeChannel(object): self.result["headers"] = headers def write(self, content): + assert isinstance(content, bytes), "Should be bytes! " + repr(content) + if "body" not in self.result: self.result["body"] = b"" -- cgit 1.4.1 From 2b075fb03a704865a759693989127cd5800e7fe6 Mon Sep 17 00:00:00 2001 From: hera Date: Thu, 8 Nov 2018 11:03:08 +0000 Subject: Fix encoding error for consent form on python3 The form was rendering this as "b'01234....'". -- richvdh --- synapse/rest/consent/consent_resource.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'synapse') diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index e0f7de5d5c..8009b7ff1c 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -160,7 +160,9 @@ class ConsentResource(Resource): try: self._render_template( request, "%s.html" % (version,), - user=username, userhmac=userhmac, version=version, + user=username, + userhmac=userhmac.decode('ascii'), + version=version, has_consented=has_consented, public_version=public_version, ) except TemplateNotFound: -- cgit 1.4.1 From 0a1fc5297148224b33013ad6b3047933ede03e15 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Nov 2018 11:12:29 +0000 Subject: fix parse_string docstring --- synapse/http/servlet.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index a1e4b88e6d..528125e737 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -121,16 +121,15 @@ def parse_string(request, name, default=None, required=False, Args: request: the twisted HTTP request. - name (bytes/unicode): the name of the query parameter. - default (bytes/unicode|None): value to use if the parameter is absent, + name (bytes|unicode): the name of the query parameter. + default (bytes|unicode|None): value to use if the parameter is absent, defaults to None. Must be bytes if encoding is None. required (bool): whether to raise a 400 SynapseError if the parameter is absent, defaults to False. - allowed_values (list[bytes/unicode]): List of allowed values for the + allowed_values (list[bytes|unicode]): List of allowed values for the string, or None if any value is allowed, defaults to None. Must be the same type as name, if given. - encoding: The encoding to decode the name to, and decode the string - content with. + encoding (str|None): The encoding to decode the string content with. Returns: bytes/unicode|None: A string value or the default. Unicode if encoding -- cgit 1.4.1 From b1a22b24ab7532da993e673f353dd87eeef49151 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Nov 2018 12:14:20 +0000 Subject: Fix noop checks when updating device keys Clients often reupload their device keys (for some reason) so its important for the server to check for no-ops before sending out device list update notifications. The check is broken in python 3 due to the fact comparing bytes and unicode always fails, and that we write bytes to the DB but get unicode when we read. --- synapse/storage/end_to_end_keys.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'synapse') diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 1f1721e820..29281630c0 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -40,6 +40,11 @@ class EndToEndKeyStore(SQLBaseStore): allow_none=True, ) + if old_key_json and not isinstance(old_key_json, bytes): + # In py3 we need old_key_json to match new_key_json type. The DB + # returns unicode while encode_canonical_json returns bytes + old_key_json = old_key_json.encode("utf-8") + new_key_json = encode_canonical_json(device_keys) if old_key_json == new_key_json: return False -- cgit 1.4.1 From 5ebed186926eee77844730f5270a926417a0be09 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Nov 2018 12:33:13 +0000 Subject: Lets convert bytes to unicode instead --- synapse/storage/end_to_end_keys.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'synapse') diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 29281630c0..2a0f6cfca9 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -40,12 +40,10 @@ class EndToEndKeyStore(SQLBaseStore): allow_none=True, ) - if old_key_json and not isinstance(old_key_json, bytes): - # In py3 we need old_key_json to match new_key_json type. The DB - # returns unicode while encode_canonical_json returns bytes - old_key_json = old_key_json.encode("utf-8") + # In py3 we need old_key_json to match new_key_json type. The DB + # returns unicode while encode_canonical_json returns bytes. + new_key_json = encode_canonical_json(device_keys).decode("utf-8") - new_key_json = encode_canonical_json(device_keys) if old_key_json == new_key_json: return False -- cgit 1.4.1